APIUtilityVerifier BooleanVerifier
@Test public void testUnregistrationReturnValue(){
RefreshHandler mockHandler=Mockito.mock(RefreshHandler.class);
RefreshRegistry.defaultRegistry().register("test",mockHandler);
boolean ret=RefreshRegistry.defaultRegistry().unregister("test",mockHandler);
assertTrue(ret);
}
APIUtilityVerifier TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier
@Before @Override public void setUp() throws Exception {
super.setUp();
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,HDFSPolicyProvider.class,PolicyProvider.class);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1);
String[] racks={"/rack1","/rack1","/rack2","/rack2","/rack2","/rack3","/rack4","/rack4"};
String[] hosts={"host1","host2","host3","host4","host5","host6","host7","host8"};
dfsCluster=new MiniDFSCluster.Builder(conf).numDataNodes(8).racks(racks).hosts(hosts).build();
dfsCluster.waitClusterUp();
namenode=conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///");
username=System.getProperty("user.name");
fs=dfsCluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWriteXml() throws Exception {
StringWriter sw=new StringWriter();
ConfServlet.writeResponse(getTestConf(),sw,"xml");
String xml=sw.toString();
DocumentBuilderFactory docBuilderFactory=DocumentBuilderFactory.newInstance();
DocumentBuilder builder=docBuilderFactory.newDocumentBuilder();
Document doc=builder.parse(new InputSource(new StringReader(xml)));
NodeList nameNodes=doc.getElementsByTagName("name");
boolean foundSetting=false;
for (int i=0; i < nameNodes.getLength(); i++) {
Node nameNode=nameNodes.item(i);
String key=nameNode.getTextContent();
System.err.println("xml key: " + key);
if (TEST_KEY.equals(key)) {
foundSetting=true;
Element propertyElem=(Element)nameNode.getParentNode();
String val=propertyElem.getElementsByTagName("value").item(0).getTextContent();
assertEquals(TEST_VAL,val);
}
}
assertTrue(foundSetting);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Run a set of threads making changes to the deprecations
* concurrently with another set of threads calling get()
* and set() on Configuration objects.
*/
@SuppressWarnings("deprecation") @Test(timeout=60000) public void testConcurrentDeprecateAndManipulate() throws Exception {
final int NUM_THREAD_IDS=10;
final int NUM_KEYS_PER_THREAD=1000;
ScheduledThreadPoolExecutor executor=new ScheduledThreadPoolExecutor(2 * NUM_THREAD_IDS,new ThreadFactoryBuilder().setDaemon(true).setNameFormat("testConcurrentDeprecateAndManipulate modification thread %d").build());
final CountDownLatch latch=new CountDownLatch(1);
final AtomicInteger highestModificationThreadId=new AtomicInteger(1);
List> futures=new LinkedList>();
for (int i=0; i < NUM_THREAD_IDS; i++) {
futures.add(executor.schedule(new Callable(){
@Override public Void call() throws Exception {
latch.await();
int threadIndex=highestModificationThreadId.addAndGet(1);
for (int i=0; i < NUM_KEYS_PER_THREAD; i++) {
String testKey=getTestKeyName(threadIndex,i);
String testNewKey=testKey + ".new";
Configuration.addDeprecations(new DeprecationDelta[]{new DeprecationDelta(testKey,testNewKey)});
}
return null;
}
}
,0,TimeUnit.SECONDS));
}
final AtomicInteger highestAccessThreadId=new AtomicInteger(1);
for (int i=0; i < NUM_THREAD_IDS; i++) {
futures.add(executor.schedule(new Callable(){
@Override public Void call() throws Exception {
Configuration conf=new Configuration();
latch.await();
int threadIndex=highestAccessThreadId.addAndGet(1);
for (int i=0; i < NUM_KEYS_PER_THREAD; i++) {
String testNewKey=getTestKeyName(threadIndex,i) + ".new";
String value="value." + threadIndex + "."+ i;
conf.set(testNewKey,value);
Assert.assertEquals(value,conf.get(testNewKey));
}
return null;
}
}
,0,TimeUnit.SECONDS));
}
latch.countDown();
for ( Future future : futures) {
Uninterruptibles.getUninterruptibly(future);
}
}
APIUtilityVerifier BooleanVerifier
/**
* Test ReconfigurationUtil.getChangedProperties.
*/
@Test public void testGetChangedProperties(){
Collection changes=ReconfigurationUtil.getChangedProperties(conf2,conf1);
assertTrue("expected 3 changed properties but got " + changes.size(),changes.size() == 3);
boolean changeFound=false;
boolean unsetFound=false;
boolean setFound=false;
for ( ReconfigurationUtil.PropertyChange c : changes) {
if (c.prop.equals(PROP2) && c.oldVal != null && c.oldVal.equals(VAL1) && c.newVal != null && c.newVal.equals(VAL2)) {
changeFound=true;
}
else if (c.prop.equals(PROP3) && c.oldVal != null && c.oldVal.equals(VAL1) && c.newVal == null) {
unsetFound=true;
}
else if (c.prop.equals(PROP4) && c.oldVal == null && c.newVal != null && c.newVal.equals(VAL1)) {
setFound=true;
}
}
assertTrue("not all changes have been applied",changeFound && unsetFound && setFound);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test HA failover, where BK, as the shared storage, fails.
* Once it becomes available again, a standby can come up.
* Verify that any write happening after the BK fail is not
* available on the standby.
*/
@Test public void testFailoverWithFailingBKCluster() throws Exception {
int ensembleSize=numBookies + 1;
BookieServer newBookie=bkutil.newBookie();
assertEquals("New bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
BookieServer replacementBookie=null;
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/hotfailoverWithFail").toString());
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,ensembleSize);
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,ensembleSize);
BKJMUtil.addJournalManagerDefinition(conf);
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).manageNameDfsSharedDirs(false).checkExitOnShutdown(false).build();
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
cluster.waitActive();
cluster.transitionToActive(0);
Path p1=new Path("/testBKJMFailingBKCluster1");
Path p2=new Path("/testBKJMFailingBKCluster2");
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
fs.mkdirs(p1);
newBookie.shutdown();
assertEquals("New bookie didn't stop",numBookies,bkutil.checkBookiesUp(numBookies,10));
try {
fs.mkdirs(p2);
fail("mkdirs should result in the NN exiting");
}
catch ( RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
}
cluster.shutdownNameNode(0);
try {
cluster.transitionToActive(1);
fail("Shouldn't have been able to transition with bookies down");
}
catch ( ExitException ee) {
assertTrue("Should shutdown due to required journal failure",ee.getMessage().contains("starting log segment 3 failed for required journal"));
}
replacementBookie=bkutil.newBookie();
assertEquals("Replacement bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
cluster.transitionToActive(1);
assertTrue(fs.exists(p1));
assertFalse(fs.exists(p2));
}
finally {
newBookie.shutdown();
if (replacementBookie != null) {
replacementBookie.shutdown();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test simple HA failover usecase with BK
*/
@Test public void testFailoverWithBK() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/hotfailover").toString());
BKJMUtil.addJournalManagerDefinition(conf);
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).manageNameDfsSharedDirs(false).build();
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
cluster.waitActive();
cluster.transitionToActive(0);
Path p=new Path("/testBKJMfailover");
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
fs.mkdirs(p);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
assertTrue(fs.exists(p));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Tests that the edit log file meta data reading from ZooKeeper should be
* able to handle the NoNodeException. bkjm.getInputStream(fromTxId,
* inProgressOk) should suppress the NoNodeException and continue. HDFS-3441.
*/
@Test public void testEditLogFileNotExistsWhenReadingMetadata() throws Exception {
URI uri=BKJMUtil.createJournalURI("/hdfsjournal-editlogfile");
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,uri,nsi);
bkjm.format(nsi);
try {
String zkpath1=startAndFinalizeLogSegment(bkjm,1,50);
String zkpath2=startAndFinalizeLogSegment(bkjm,51,100);
ZooKeeper zkspy=spy(BKJMUtil.connectZooKeeper());
bkjm.setZooKeeper(zkspy);
Mockito.doThrow(new KeeperException.NoNodeException(zkpath2 + " doesn't exists")).when(zkspy).getData(zkpath2,false,null);
List ledgerList=bkjm.getLedgerList(false);
assertEquals("List contains the metadata of non exists path.",1,ledgerList.size());
assertEquals("LogLedgerMetadata contains wrong zk paths.",zkpath1,ledgerList.get(0).getZkPath());
}
finally {
bkjm.close();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* While boostrapping, in_progress transaction entries should be skipped.
* Bootstrap usage for BKJM : "-force", "-nonInteractive", "-skipSharedEditsCheck"
*/
@Test public void testBootstrapStandbyWithActiveNN() throws Exception {
cluster.transitionToActive(0);
Configuration confNN1=cluster.getConfiguration(1);
DistributedFileSystem dfs=(DistributedFileSystem)HATestUtil.configureFailoverFs(cluster,confNN1);
for (int i=1; i <= 10; i++) {
dfs.mkdirs(new Path("/test" + i));
}
dfs.close();
cluster.shutdownNameNode(1);
deleteEditLogIfExists(confNN1);
cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_ENTER,true);
cluster.getNameNodeRpc(0).saveNamespace();
cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_LEAVE,true);
int rc=BootstrapStandby.run(new String[]{"-force","-nonInteractive"},confNN1);
Assert.assertEquals("Mismatches return code",6,rc);
rc=BootstrapStandby.run(new String[]{"-force","-nonInteractive","-skipSharedEditsCheck"},confNN1);
Assert.assertEquals("Mismatches return code",0,rc);
confNN1.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY,1);
cluster.restartNameNode(1);
cluster.transitionToStandby(1);
NameNode nn0=cluster.getNameNode(0);
HATestUtil.waitForStandbyToCatchUp(nn0,cluster.getNameNode(1));
long expectedCheckpointTxId=NameNodeAdapter.getNamesystem(nn0).getFSImage().getMostRecentCheckpointTxId();
HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of((int)expectedCheckpointTxId));
FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of((int)expectedCheckpointTxId));
FSImageTestUtil.assertNNFilesMatch(cluster);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=120000) public void testCombinedOp() throws Exception {
OutputStream out=getOutputStream(defaultBufferSize);
writeData(out);
final int len1=dataLen / 8;
final int len2=dataLen / 10;
InputStream in=getInputStream(defaultBufferSize);
byte[] readData=new byte[len1];
readAll(in,readData,0,len1);
byte[] expectedData=new byte[len1];
System.arraycopy(data,0,expectedData,0,len1);
Assert.assertArrayEquals(readData,expectedData);
long pos=((Seekable)in).getPos();
Assert.assertEquals(len1,pos);
((Seekable)in).seek(pos + len2);
long n=in.skip(len2);
Assert.assertEquals(len2,n);
positionedReadCheck(in,dataLen / 4);
pos=((Seekable)in).getPos();
Assert.assertEquals(len1 + len2 + len2,pos);
ByteBuffer buf=ByteBuffer.allocate(len1);
int nRead=((ByteBufferReadable)in).read(buf);
readData=new byte[nRead];
buf.rewind();
buf.get(readData);
expectedData=new byte[nRead];
System.arraycopy(data,(int)pos,expectedData,0,nRead);
Assert.assertArrayEquals(readData,expectedData);
pos=((Seekable)in).getPos();
Assert.assertEquals(len1 + 2 * len2 + nRead,pos);
positionedReadCheck(in,dataLen / 3);
readData=new byte[len1];
readAll(in,readData,0,len1);
expectedData=new byte[len1];
System.arraycopy(data,(int)pos,expectedData,0,len1);
Assert.assertArrayEquals(readData,expectedData);
pos=((Seekable)in).getPos();
Assert.assertEquals(2 * len1 + 2 * len2 + nRead,pos);
buf=ByteBuffer.allocate(len1);
nRead=((ByteBufferReadable)in).read(buf);
readData=new byte[nRead];
buf.rewind();
buf.get(readData);
expectedData=new byte[nRead];
System.arraycopy(data,(int)pos,expectedData,0,nRead);
Assert.assertArrayEquals(readData,expectedData);
((Seekable)in).seek(dataLen);
buf.clear();
n=((ByteBufferReadable)in).read(buf);
Assert.assertEquals(n,-1);
in.close();
}
APIUtilityVerifier EqualityVerifier
/**
* Test get position.
*/
@Test(timeout=120000) public void testGetPos() throws Exception {
OutputStream out=getOutputStream(defaultBufferSize);
writeData(out);
InputStream in=getInputStream(defaultBufferSize);
byte[] result=new byte[dataLen];
int n1=readAll(in,result,0,dataLen / 3);
Assert.assertEquals(n1,((Seekable)in).getPos());
int n2=readAll(in,result,n1,dataLen - n1);
Assert.assertEquals(n1 + n2,((Seekable)in).getPos());
in.close();
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=120000) public void testAvailable() throws Exception {
OutputStream out=getOutputStream(defaultBufferSize);
writeData(out);
InputStream in=getInputStream(defaultBufferSize);
byte[] result=new byte[dataLen];
int n1=readAll(in,result,0,dataLen / 3);
Assert.assertEquals(in.available(),dataLen - n1);
int n2=readAll(in,result,n1,dataLen - n1);
Assert.assertEquals(in.available(),dataLen - n1 - n2);
in.close();
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
/**
* Test skip.
*/
@Test(timeout=120000) public void testSkip() throws Exception {
OutputStream out=getOutputStream(defaultBufferSize);
writeData(out);
InputStream in=getInputStream(defaultBufferSize);
byte[] result=new byte[dataLen];
int n1=readAll(in,result,0,dataLen / 3);
Assert.assertEquals(n1,((Seekable)in).getPos());
long skipped=in.skip(dataLen / 3);
int n2=readAll(in,result,0,dataLen);
Assert.assertEquals(dataLen,n1 + skipped + n2);
byte[] readData=new byte[n2];
System.arraycopy(result,0,readData,0,n2);
byte[] expectedData=new byte[n2];
System.arraycopy(data,dataLen - n2,expectedData,0,n2);
Assert.assertArrayEquals(readData,expectedData);
try {
skipped=in.skip(-3);
Assert.fail("Skip Negative length should fail.");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Negative skip length",e);
}
skipped=in.skip(3);
Assert.assertEquals(skipped,0);
in.close();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=120000) public void testHasEnhancedByteBufferAccess() throws Exception {
OutputStream out=getOutputStream(defaultBufferSize);
writeData(out);
InputStream in=getInputStream(defaultBufferSize);
final int len1=dataLen / 8;
ByteBuffer buffer=((HasEnhancedByteBufferAccess)in).read(getBufferPool(),len1,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
int n1=buffer.remaining();
byte[] readData=new byte[n1];
buffer.get(readData);
byte[] expectedData=new byte[n1];
System.arraycopy(data,0,expectedData,0,n1);
Assert.assertArrayEquals(readData,expectedData);
((HasEnhancedByteBufferAccess)in).releaseBuffer(buffer);
readData=new byte[len1];
readAll(in,readData,0,len1);
expectedData=new byte[len1];
System.arraycopy(data,n1,expectedData,0,len1);
Assert.assertArrayEquals(readData,expectedData);
buffer=((HasEnhancedByteBufferAccess)in).read(getBufferPool(),len1,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
int n2=buffer.remaining();
readData=new byte[n2];
buffer.get(readData);
expectedData=new byte[n2];
System.arraycopy(data,n1 + len1,expectedData,0,n2);
Assert.assertArrayEquals(readData,expectedData);
((HasEnhancedByteBufferAccess)in).releaseBuffer(buffer);
in.close();
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
/**
* Test seek to different position.
*/
@Test(timeout=120000) public void testSeek() throws Exception {
OutputStream out=getOutputStream(defaultBufferSize);
writeData(out);
InputStream in=getInputStream(defaultBufferSize);
seekCheck(in,dataLen / 3);
seekCheck(in,0);
seekCheck(in,dataLen / 2);
final long pos=((Seekable)in).getPos();
try {
seekCheck(in,-3);
Assert.fail("Seek to negative offset should fail.");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Cannot seek to negative " + "offset",e);
}
Assert.assertEquals(pos,((Seekable)in).getPos());
try {
seekCheck(in,dataLen + 3);
Assert.fail("Seek after EOF should fail.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Cannot seek after EOF",e);
}
Assert.assertEquals(pos,((Seekable)in).getPos());
in.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testMetadata() throws Exception {
DateFormat format=new SimpleDateFormat("y/m/d");
Date date=format.parse("2013/12/25");
KeyProvider.Metadata meta=new KeyProvider.Metadata("myCipher",100,null,null,date,123);
assertEquals("myCipher",meta.getCipher());
assertEquals(100,meta.getBitLength());
assertNull(meta.getDescription());
assertEquals(date,meta.getCreated());
assertEquals(123,meta.getVersions());
KeyProvider.Metadata second=new KeyProvider.Metadata(meta.serialize());
assertEquals(meta.getCipher(),second.getCipher());
assertEquals(meta.getBitLength(),second.getBitLength());
assertNull(second.getDescription());
assertTrue(second.getAttributes().isEmpty());
assertEquals(meta.getCreated(),second.getCreated());
assertEquals(meta.getVersions(),second.getVersions());
int newVersion=second.addVersion();
assertEquals(123,newVersion);
assertEquals(124,second.getVersions());
assertEquals(123,meta.getVersions());
format=new SimpleDateFormat("y/m/d");
date=format.parse("2013/12/25");
Map attributes=new HashMap();
attributes.put("a","A");
meta=new KeyProvider.Metadata("myCipher",100,"description",attributes,date,123);
assertEquals("myCipher",meta.getCipher());
assertEquals(100,meta.getBitLength());
assertEquals("description",meta.getDescription());
assertEquals(attributes,meta.getAttributes());
assertEquals(date,meta.getCreated());
assertEquals(123,meta.getVersions());
second=new KeyProvider.Metadata(meta.serialize());
assertEquals(meta.getCipher(),second.getCipher());
assertEquals(meta.getBitLength(),second.getBitLength());
assertEquals(meta.getDescription(),second.getDescription());
assertEquals(meta.getAttributes(),second.getAttributes());
assertEquals(meta.getCreated(),second.getCreated());
assertEquals(meta.getVersions(),second.getVersions());
newVersion=second.addVersion();
assertEquals(123,newVersion);
assertEquals(124,second.getVersions());
assertEquals(123,meta.getVersions());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testOptions() throws Exception {
Configuration conf=new Configuration();
conf.set(KeyProvider.DEFAULT_CIPHER_NAME,"myCipher");
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME,512);
Map attributes=new HashMap();
attributes.put("a","A");
KeyProvider.Options options=KeyProvider.options(conf);
assertEquals("myCipher",options.getCipher());
assertEquals(512,options.getBitLength());
options.setCipher("yourCipher");
options.setDescription("description");
options.setAttributes(attributes);
options.setBitLength(128);
assertEquals("yourCipher",options.getCipher());
assertEquals(128,options.getBitLength());
assertEquals("description",options.getDescription());
assertEquals(attributes,options.getAttributes());
options=KeyProvider.options(new Configuration());
assertEquals(KeyProvider.DEFAULT_CIPHER,options.getCipher());
assertEquals(KeyProvider.DEFAULT_BITLENGTH,options.getBitLength());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testEncryptDecrypt() throws Exception {
KeyProviderCryptoExtension.EncryptedKeyVersion eek=kpExt.generateEncryptedKey(encryptionKey.getName());
final byte[] encryptedKeyIv=eek.getEncryptedKeyIv();
final byte[] encryptedKeyMaterial=eek.getEncryptedKeyVersion().getMaterial();
Cipher cipher=Cipher.getInstance("AES/CTR/NoPadding");
cipher.init(Cipher.DECRYPT_MODE,new SecretKeySpec(encryptionKey.getMaterial(),"AES"),new IvParameterSpec(KeyProviderCryptoExtension.EncryptedKeyVersion.deriveIV(encryptedKeyIv)));
final byte[] manualMaterial=cipher.doFinal(encryptedKeyMaterial);
EncryptedKeyVersion eek2=EncryptedKeyVersion.createForDecryption(eek.getEncryptionKeyVersionName(),eek.getEncryptedKeyIv(),eek.getEncryptedKeyVersion().getMaterial());
KeyVersion decryptedKey=kpExt.decryptEncryptedKey(eek2);
final byte[] apiMaterial=decryptedKey.getMaterial();
assertArrayEquals("Wrong key material from decryptEncryptedKey",manualMaterial,apiMaterial);
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCreateExtension() throws Exception {
Configuration conf=new Configuration();
Credentials credentials=new Credentials();
KeyProvider kp=new UserProvider.Factory().createProvider(new URI("user:///"),conf);
KeyProviderDelegationTokenExtension kpDTE1=KeyProviderDelegationTokenExtension.createKeyProviderDelegationTokenExtension(kp);
Assert.assertNotNull(kpDTE1);
Assert.assertNull(kpDTE1.addDelegationTokens("user",credentials));
MockKeyProvider mock=mock(MockKeyProvider.class);
when(mock.addDelegationTokens("renewer",credentials)).thenReturn(new Token>[]{new Token(null,null,new Text("kind"),new Text("service"))});
KeyProviderDelegationTokenExtension kpDTE2=KeyProviderDelegationTokenExtension.createKeyProviderDelegationTokenExtension(mock);
Token>[] tokens=kpDTE2.addDelegationTokens("renewer",credentials);
Assert.assertNotNull(tokens);
Assert.assertEquals("kind",tokens[0].getKind().toString());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testJksProvider() throws Exception {
Configuration conf=new Configuration();
final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks";
File file=new File(tmpDir,"test.jks");
file.delete();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,ourUrl);
checkSpecificProvider(conf,ourUrl);
Path path=ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs=path.getFileSystem(conf);
FileStatus s=fs.getFileStatus(path);
assertTrue(s.getPermission().toString().equals("rwx------"));
assertTrue(file + " should exist",file.isFile());
File oldFile=new File(file.getPath() + "_OLD");
file.renameTo(oldFile);
file.delete();
file.createNewFile();
assertTrue(oldFile.exists());
KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0);
assertTrue(file.exists());
assertTrue(oldFile + "should be deleted",!oldFile.exists());
verifyAfterReload(file,provider);
assertTrue(!oldFile.exists());
File newFile=new File(file.getPath() + "_NEW");
newFile.createNewFile();
try {
provider=KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("_NEW and current file should not exist together !!");
}
catch ( Exception e) {
}
finally {
if (newFile.exists()) {
newFile.delete();
}
}
file.renameTo(newFile);
file.delete();
try {
provider=KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
}
catch ( Exception e) {
Assert.fail("JKS should load from _NEW file !!");
}
verifyAfterReload(file,provider);
newFile.createNewFile();
file.renameTo(oldFile);
file.delete();
try {
provider=KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
}
catch ( Exception e) {
Assert.fail("JKS should load from _OLD file !!");
}
finally {
if (newFile.exists()) {
newFile.delete();
}
}
verifyAfterReload(file,provider);
fs.setPermission(path,new FsPermission("777"));
checkPermissionRetention(conf,ourUrl,path);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFactory() throws Exception {
Configuration conf=new Configuration();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,UserProvider.SCHEME_NAME + ":///," + JavaKeyStoreProvider.SCHEME_NAME+ "://file"+ tmpDir+ "/test.jks");
List providers=KeyProviderFactory.getProviders(conf);
assertEquals(2,providers.size());
assertEquals(UserProvider.class,providers.get(0).getClass());
assertEquals(JavaKeyStoreProvider.class,providers.get(1).getClass());
assertEquals(UserProvider.SCHEME_NAME + ":///",providers.get(0).toString());
assertEquals(JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks",providers.get(1).toString());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier
@Test public void testJksProviderPasswordViaConfig() throws Exception {
Configuration conf=new Configuration();
final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks";
File file=new File(tmpDir,"test.jks");
file.delete();
try {
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,ourUrl);
conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY,"javakeystoreprovider.password");
KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0);
provider.createKey("key3",new byte[16],KeyProvider.options(conf));
provider.flush();
}
catch ( Exception ex) {
Assert.fail("could not create keystore with password file");
}
KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0);
Assert.assertNotNull(provider.getCurrentKey("key3"));
try {
conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY,"bar");
KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("using non existing password file, it should fail");
}
catch ( IOException ex) {
}
try {
conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY,"core-site.xml");
KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("using different password file, it should fail");
}
catch ( IOException ex) {
}
try {
conf.unset(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY);
KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("No password file property, env not set, it should fail");
}
catch ( IOException ex) {
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testKeySuccessfulKeyLifecycle() throws Exception {
int rc=0;
String keyName="key1";
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
outContent.reset();
final String[] args1={"create",keyName,"-provider",jceksProvider};
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains(keyName + " has been " + "successfully created"));
String listOut=listKeys(ks,false);
assertTrue(listOut.contains(keyName));
listOut=listKeys(ks,true);
assertTrue(listOut.contains(keyName));
assertTrue(listOut.contains("description"));
assertTrue(listOut.contains("created"));
outContent.reset();
final String[] args2={"roll",keyName,"-provider",jceksProvider};
rc=ks.run(args2);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("key1 has been successfully " + "rolled."));
deleteKey(ks,keyName);
listOut=listKeys(ks,false);
assertFalse(listOut,listOut.contains(keyName));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAttributes() throws Exception {
int rc;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
final String[] args1={"create","keyattr1","-provider",jceksProvider,"-attr","foo=bar"};
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("keyattr1 has been " + "successfully created"));
String listOut=listKeys(ks,true);
assertTrue(listOut.contains("keyattr1"));
assertTrue(listOut.contains("attributes: [foo=bar]"));
outContent.reset();
final String[] args2={"create","keyattr2","-provider",jceksProvider,"-attr","=bar"};
rc=ks.run(args2);
assertEquals(1,rc);
outContent.reset();
args2[5]="foo";
rc=ks.run(args2);
assertEquals(1,rc);
outContent.reset();
args2[5]="=";
rc=ks.run(args2);
assertEquals(1,rc);
outContent.reset();
args2[5]="a=b=c";
rc=ks.run(args2);
assertEquals(0,rc);
listOut=listKeys(ks,true);
assertTrue(listOut.contains("keyattr2"));
assertTrue(listOut.contains("attributes: [a=b=c]"));
outContent.reset();
final String[] args3={"create","keyattr3","-provider",jceksProvider,"-attr","foo = bar","-attr"," glarch =baz ","-attr","abc=def"};
rc=ks.run(args3);
assertEquals(0,rc);
listOut=listKeys(ks,true);
assertTrue(listOut.contains("keyattr3"));
assertTrue(listOut.contains("[foo=bar]"));
assertTrue(listOut.contains("[glarch=baz]"));
assertTrue(listOut.contains("[abc=def]"));
outContent.reset();
final String[] args4={"create","keyattr4","-provider",jceksProvider,"-attr","foo=bar","-attr","foo=glarch"};
rc=ks.run(args4);
assertEquals(1,rc);
deleteKey(ks,"keyattr1");
deleteKey(ks,"keyattr2");
deleteKey(ks,"keyattr3");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testKeySuccessfulCreationWithDescription() throws Exception {
outContent.reset();
final String[] args1={"create","key1","-provider",jceksProvider,"-description","someDescription"};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("key1 has been successfully " + "created"));
String listOut=listKeys(ks,true);
assertTrue(listOut.contains("description"));
assertTrue(listOut.contains("someDescription"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testKMSProvider() throws Exception {
Configuration conf=new Configuration();
conf.set("hadoop.security.authentication","kerberos");
UserGroupInformation.setConfiguration(conf);
File confDir=getTestDir();
conf=createBaseKMSConf(confDir);
writeConf(confDir,conf);
runServer(null,null,confDir,new KMSCallable(){
@Override public Void call() throws Exception {
Date started=new Date();
Configuration conf=new Configuration();
URI uri=createKMSUri(getKMSUrl());
KeyProvider kp=new KMSClientProvider(uri,conf);
Assert.assertTrue(kp.getKeys().isEmpty());
Assert.assertEquals(0,kp.getKeysMetadata().length);
KeyProvider.Options options=new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("l1");
KeyProvider.KeyVersion kv0=kp.createKey("k1",options);
Assert.assertNotNull(kv0);
Assert.assertNotNull(kv0.getVersionName());
Assert.assertNotNull(kv0.getMaterial());
KeyProvider.KeyVersion kv1=kp.getKeyVersion(kv0.getVersionName());
Assert.assertEquals(kv0.getVersionName(),kv1.getVersionName());
Assert.assertNotNull(kv1.getMaterial());
KeyProvider.KeyVersion cv1=kp.getCurrentKey("k1");
Assert.assertEquals(kv0.getVersionName(),cv1.getVersionName());
Assert.assertNotNull(cv1.getMaterial());
KeyProvider.Metadata m1=kp.getMetadata("k1");
Assert.assertEquals("AES/CTR/NoPadding",m1.getCipher());
Assert.assertEquals("AES",m1.getAlgorithm());
Assert.assertEquals(128,m1.getBitLength());
Assert.assertEquals(1,m1.getVersions());
Assert.assertNotNull(m1.getCreated());
Assert.assertTrue(started.before(m1.getCreated()));
List lkv1=kp.getKeyVersions("k1");
Assert.assertEquals(1,lkv1.size());
Assert.assertEquals(kv0.getVersionName(),lkv1.get(0).getVersionName());
Assert.assertNotNull(kv1.getMaterial());
KeyProvider.KeyVersion kv2=kp.rollNewVersion("k1");
Assert.assertNotSame(kv0.getVersionName(),kv2.getVersionName());
Assert.assertNotNull(kv2.getMaterial());
kv2=kp.getKeyVersion(kv2.getVersionName());
boolean eq=true;
for (int i=0; i < kv1.getMaterial().length; i++) {
eq=eq && kv1.getMaterial()[i] == kv2.getMaterial()[i];
}
Assert.assertFalse(eq);
KeyProvider.KeyVersion cv2=kp.getCurrentKey("k1");
Assert.assertEquals(kv2.getVersionName(),cv2.getVersionName());
Assert.assertNotNull(cv2.getMaterial());
eq=true;
for (int i=0; i < kv1.getMaterial().length; i++) {
eq=eq && cv2.getMaterial()[i] == kv2.getMaterial()[i];
}
Assert.assertTrue(eq);
List lkv2=kp.getKeyVersions("k1");
Assert.assertEquals(2,lkv2.size());
Assert.assertEquals(kv1.getVersionName(),lkv2.get(0).getVersionName());
Assert.assertNotNull(lkv2.get(0).getMaterial());
Assert.assertEquals(kv2.getVersionName(),lkv2.get(1).getVersionName());
Assert.assertNotNull(lkv2.get(1).getMaterial());
KeyProvider.Metadata m2=kp.getMetadata("k1");
Assert.assertEquals("AES/CTR/NoPadding",m2.getCipher());
Assert.assertEquals("AES",m2.getAlgorithm());
Assert.assertEquals(128,m2.getBitLength());
Assert.assertEquals(2,m2.getVersions());
Assert.assertNotNull(m2.getCreated());
Assert.assertTrue(started.before(m2.getCreated()));
List ks1=kp.getKeys();
Assert.assertEquals(1,ks1.size());
Assert.assertEquals("k1",ks1.get(0));
KeyProvider.Metadata[] kms1=kp.getKeysMetadata("k1");
Assert.assertEquals(1,kms1.length);
Assert.assertEquals("AES/CTR/NoPadding",kms1[0].getCipher());
Assert.assertEquals("AES",kms1[0].getAlgorithm());
Assert.assertEquals(128,kms1[0].getBitLength());
Assert.assertEquals(2,kms1[0].getVersions());
Assert.assertNotNull(kms1[0].getCreated());
Assert.assertTrue(started.before(kms1[0].getCreated()));
KeyProvider.KeyVersion kv=kp.getCurrentKey("k1");
KeyProviderCryptoExtension kpExt=KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
EncryptedKeyVersion ek1=kpExt.generateEncryptedKey(kv.getName());
Assert.assertEquals(KeyProviderCryptoExtension.EEK,ek1.getEncryptedKeyVersion().getVersionName());
Assert.assertNotNull(ek1.getEncryptedKeyVersion().getMaterial());
Assert.assertEquals(kv.getMaterial().length,ek1.getEncryptedKeyVersion().getMaterial().length);
KeyProvider.KeyVersion k1=kpExt.decryptEncryptedKey(ek1);
Assert.assertEquals(KeyProviderCryptoExtension.EK,k1.getVersionName());
KeyProvider.KeyVersion k1a=kpExt.decryptEncryptedKey(ek1);
Assert.assertArrayEquals(k1.getMaterial(),k1a.getMaterial());
Assert.assertEquals(kv.getMaterial().length,k1.getMaterial().length);
EncryptedKeyVersion ek2=kpExt.generateEncryptedKey(kv.getName());
KeyProvider.KeyVersion k2=kpExt.decryptEncryptedKey(ek2);
boolean isEq=true;
for (int i=0; isEq && i < ek2.getEncryptedKeyVersion().getMaterial().length; i++) {
isEq=k2.getMaterial()[i] == k1.getMaterial()[i];
}
Assert.assertFalse(isEq);
kp.deleteKey("k1");
Assert.assertNull(kp.getKeyVersion("k1"));
Assert.assertNull(kp.getKeyVersions("k1"));
Assert.assertNull(kp.getMetadata("k1"));
Assert.assertTrue(kp.getKeys().isEmpty());
Assert.assertEquals(0,kp.getKeysMetadata().length);
options=new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
kp.createKey("k2",options);
KeyProvider.Metadata meta=kp.getMetadata("k2");
Assert.assertNull(meta.getDescription());
Assert.assertTrue(meta.getAttributes().isEmpty());
options=new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("d");
kp.createKey("k3",options);
meta=kp.getMetadata("k3");
Assert.assertEquals("d",meta.getDescription());
Assert.assertTrue(meta.getAttributes().isEmpty());
Map attributes=new HashMap();
attributes.put("a","A");
options=new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setAttributes(attributes);
kp.createKey("k4",options);
meta=kp.getMetadata("k4");
Assert.assertNull(meta.getDescription());
Assert.assertEquals(attributes,meta.getAttributes());
options=new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("d");
options.setAttributes(attributes);
kp.createKey("k5",options);
meta=kp.getMetadata("k5");
Assert.assertEquals("d",meta.getDescription());
Assert.assertEquals(attributes,meta.getAttributes());
KeyProviderDelegationTokenExtension kpdte=KeyProviderDelegationTokenExtension.createKeyProviderDelegationTokenExtension(kp);
Credentials credentials=new Credentials();
kpdte.addDelegationTokens("foo",credentials);
Assert.assertEquals(1,credentials.getAllTokens().size());
InetSocketAddress kmsAddr=new InetSocketAddress(getKMSUrl().getHost(),getKMSUrl().getPort());
Assert.assertEquals(new Text("kms-dt"),credentials.getToken(SecurityUtil.buildTokenService(kmsAddr)).getKind());
return null;
}
}
);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testStatistics() throws IOException, URISyntaxException {
URI fsUri=getFsUri();
Statistics stats=FileContext.getStatistics(fsUri);
Assert.assertEquals(0,stats.getBytesRead());
Path filePath=fileContextTestHelper.getTestRootPath(fc,"file1");
createFile(fc,filePath,numBlocks,blockSize);
Assert.assertEquals(0,stats.getBytesRead());
verifyWrittenBytes(stats);
FSDataInputStream fstr=fc.open(filePath);
byte[] buf=new byte[blockSize];
int bytesRead=fstr.read(buf,0,blockSize);
fstr.read(0,buf,0,blockSize);
Assert.assertEquals(blockSize,bytesRead);
verifyReadBytes(stats);
verifyWrittenBytes(stats);
verifyReadBytes(FileContext.getStatistics(getFsUri()));
Map statsMap=FileContext.getAllStatistics();
URI exactUri=getSchemeAuthorityUri();
verifyWrittenBytes(statsMap.get(exactUri));
fc.delete(filePath,true);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testListStatusFilterWithNoMatches() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA2),getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.listStatus(getTestRootPath(fSys,"test"),TEST_X_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultiplePathWildcardsAndNonTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/a??"),TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusSomeMatchesInDirectories() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop*"));
Assert.assertEquals(2,paths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop"),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop2"),paths));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testOverwrite() throws IOException {
Path path=getTestRootPath(fSys,"test/hadoop/file");
fSys.mkdirs(path.getParent());
createFile(path);
Assert.assertTrue("Exists",exists(fSys,path));
Assert.assertEquals("Length",data.length,fSys.getFileStatus(path).getLen());
try {
createFile(path);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
FSDataOutputStream out=fSys.create(path,true,4096);
out.write(data,0,data.length);
out.close();
Assert.assertTrue("Exists",exists(fSys,path));
Assert.assertEquals("Length",data.length,fSys.getFileStatus(path).getLen());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir=getTestRootPath(fSys,"test/hadoop");
Assert.assertFalse(exists(fSys,testDir));
fSys.mkdirs(testDir);
Assert.assertTrue(exists(fSys,testDir));
createFile(getTestRootPath(fSys,"test/hadoop/file"));
Path testSubDir=getTestRootPath(fSys,"test/hadoop/file/subdir");
try {
fSys.mkdirs(testSubDir);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fSys,testSubDir));
Path testDeepSubDir=getTestRootPath(fSys,"test/hadoop/file/deep/sub/dir");
try {
fSys.mkdirs(testDeepSubDir);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fSys,testDeepSubDir));
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testGetWrappedInputStream() throws IOException {
Path src=getTestRootPath(fSys,"test/hadoop/file");
createFile(src);
FSDataInputStream in=fSys.open(src);
InputStream is=in.getWrappedStream();
in.close();
Assert.assertNotNull(is);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWorkingDirectory() throws Exception {
Path workDir=new Path(getAbsoluteTestRootPath(fSys),new Path("test"));
fSys.setWorkingDirectory(workDir);
Assert.assertEquals(workDir,fSys.getWorkingDirectory());
fSys.setWorkingDirectory(new Path("."));
Assert.assertEquals(workDir,fSys.getWorkingDirectory());
fSys.setWorkingDirectory(new Path(".."));
Assert.assertEquals(workDir.getParent(),fSys.getWorkingDirectory());
workDir=new Path(getAbsoluteTestRootPath(fSys),new Path("test"));
fSys.setWorkingDirectory(workDir);
Assert.assertEquals(workDir,fSys.getWorkingDirectory());
Path relativeDir=new Path("existingDir1");
Path absoluteDir=new Path(workDir,"existingDir1");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(relativeDir);
Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory());
absoluteDir=getTestRootPath(fSys,"test/existingDir2");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory());
Path absolutePath=new Path(absoluteDir,"foo");
createFile(fSys,absolutePath);
fSys.open(new Path("foo")).close();
fSys.mkdirs(new Path("newDir"));
Assert.assertTrue(isDir(fSys,new Path(absoluteDir,"newDir")));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGlobStatusFilterWithEmptyPathResults() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/?"),DEFAULT_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGlobStatusFilterWithNoMatchingPathsAndNonTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/?"),TEST_X_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultipleWildCardMatchesAndTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/a??"),DEFAULT_FILTER);
Assert.assertEquals(3,filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusNonExistentFile() throws Exception {
FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoopfsdf"));
Assert.assertNull(paths);
paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoopfsdf/?"));
Assert.assertEquals(0,paths.length);
paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoopfsdf/xyz*/?"));
Assert.assertEquals(0,paths.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteEmptyDirectory() throws IOException {
Path dir=getTestRootPath(fSys,"test/hadoop");
fSys.mkdirs(dir);
Assert.assertTrue("Dir exists",exists(fSys,dir));
Assert.assertTrue("Deleted",fSys.delete(dir,false));
Assert.assertFalse("Dir doesn't exist",exists(fSys,dir));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGlobStatusWithNoMatchesInPath() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/?"));
Assert.assertEquals(0,paths.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWriteInNonExistentDirectory() throws IOException {
Path path=getTestRootPath(fSys,"test/hadoop/file");
Assert.assertFalse("Parent doesn't exist",exists(fSys,path.getParent()));
createFile(path);
Assert.assertTrue("Exists",exists(fSys,path));
Assert.assertEquals("Length",data.length,fSys.getFileStatus(path).getLen());
Assert.assertTrue("Parent exists",exists(fSys,path.getParent()));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDeleteRecursively() throws IOException {
Path dir=getTestRootPath(fSys,"test/hadoop");
Path file=getTestRootPath(fSys,"test/hadoop/file");
Path subdir=getTestRootPath(fSys,"test/hadoop/subdir");
createFile(file);
fSys.mkdirs(subdir);
Assert.assertTrue("File exists",exists(fSys,file));
Assert.assertTrue("Dir exists",exists(fSys,dir));
Assert.assertTrue("Subdir exists",exists(fSys,subdir));
try {
fSys.delete(dir,false);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertTrue("File still exists",exists(fSys,file));
Assert.assertTrue("Dir still exists",exists(fSys,dir));
Assert.assertTrue("Subdir still exists",exists(fSys,subdir));
Assert.assertTrue("Deleted",fSys.delete(dir,true));
Assert.assertFalse("File doesn't exist",exists(fSys,file));
Assert.assertFalse("Dir doesn't exist",exists(fSys,dir));
Assert.assertFalse("Subdir doesn't exist",exists(fSys,subdir));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultiplePathMatchesAndNonTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/*"),TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier BooleanVerifier
@Test public void testDeleteNonExistentFile() throws IOException {
Path path=getTestRootPath(fSys,"test/hadoop/file");
Assert.assertFalse("Doesn't exist",exists(fSys,path));
Assert.assertFalse("No deletion",fSys.delete(path,true));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListStatus() throws Exception {
Path[] testDirs={getTestRootPath(fSys,"test/hadoop/a"),getTestRootPath(fSys,"test/hadoop/b"),getTestRootPath(fSys,"test/hadoop/c/1")};
Assert.assertFalse(exists(fSys,testDirs[0]));
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
FileStatus[] paths=fSys.listStatus(getTestRootPath(fSys,"test"));
Assert.assertEquals(1,paths.length);
Assert.assertEquals(getTestRootPath(fSys,"test/hadoop"),paths[0].getPath());
paths=fSys.listStatus(getTestRootPath(fSys,"test/hadoop"));
Assert.assertEquals(3,paths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop/a"),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop/b"),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop/c"),paths));
paths=fSys.listStatus(getTestRootPath(fSys,"test/hadoop/a"));
Assert.assertEquals(0,paths.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithSomePathMatchesAndTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/*"),DEFAULT_FILTER);
Assert.assertEquals(3,filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusWithMultipleMatchesOfSingleChar() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/ax?"));
Assert.assertEquals(2,paths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),paths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMkdirs() throws Exception {
Path testDir=getTestRootPath(fSys,"test/hadoop");
Assert.assertFalse(exists(fSys,testDir));
Assert.assertFalse(isFile(fSys,testDir));
fSys.mkdirs(testDir);
Assert.assertTrue(exists(fSys,testDir));
Assert.assertFalse(isFile(fSys,testDir));
fSys.mkdirs(testDir);
Assert.assertTrue(exists(fSys,testDir));
Assert.assertFalse(isFile(fSys,testDir));
Path parentDir=testDir.getParent();
Assert.assertTrue(exists(fSys,parentDir));
Assert.assertFalse(isFile(fSys,parentDir));
Path grandparentDir=parentDir.getParent();
Assert.assertTrue(exists(fSys,grandparentDir));
Assert.assertFalse(isFile(fSys,grandparentDir));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusWithMultipleWildCardMatches() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop*/*"));
Assert.assertEquals(4,paths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA2),paths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListStatusFilterWithSomeMatches() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.listStatus(getTestRootPath(fSys,"test/hadoop"),TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMkdirRecursiveWithExistingDir() throws IOException {
Path f=getTestRootPath(fc,"aDir");
fc.mkdir(f,FileContext.DEFAULT_PERM,true);
Assert.assertTrue(isDir(fc,f));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMkdirRecursiveWithNonExistingDir() throws IOException {
Path f=getTestRootPath(fc,"NonExistant2/aDir");
fc.mkdir(f,FileContext.DEFAULT_PERM,true);
Assert.assertTrue(isDir(fc,f));
}
APIUtilityVerifier BooleanVerifier
@Test public void testCreateRecursiveWithExistingDir() throws IOException {
Path f=getTestRootPath(fc,"foo");
createFile(fc,f);
Assert.assertTrue(isFile(fc,f));
}
APIUtilityVerifier BooleanVerifier
@Test public void testCreateRecursiveWithNonExistingDir() throws IOException {
Path f=getTestRootPath(fc,"NonExisting/foo");
createFile(fc,f);
Assert.assertTrue(isFile(fc,f));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMkdirNonRecursiveWithExistingDir() throws IOException {
Path f=getTestRootPath(fc,"aDir");
fc.mkdir(f,FileContext.DEFAULT_PERM,false);
Assert.assertTrue(isDir(fc,f));
}
APIUtilityVerifier BooleanVerifier
@Test public void testCreateNonRecursiveWithExistingDir() throws IOException {
Path f=getTestRootPath(fc,"foo");
createFile(fc,f);
Assert.assertTrue(isFile(fc,f));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusSomeMatchesInDirectories() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop*"));
Assert.assertEquals(2,paths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop"),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop2"),paths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMkdirs() throws Exception {
Path testDir=getTestRootPath(fc,"test/hadoop");
Assert.assertFalse(exists(fc,testDir));
Assert.assertFalse(isFile(fc,testDir));
fc.mkdir(testDir,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc,testDir));
Assert.assertFalse(isFile(fc,testDir));
fc.mkdir(testDir,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc,testDir));
Assert.assertFalse(isFile(fc,testDir));
Path parentDir=testDir.getParent();
Assert.assertTrue(exists(fc,parentDir));
Assert.assertFalse(isFile(fc,parentDir));
Path grandparentDir=parentDir.getParent();
Assert.assertTrue(exists(fc,grandparentDir));
Assert.assertFalse(isFile(fc,grandparentDir));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGlobStatusFilterWithEmptyPathResults() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/?"),DEFAULT_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testListStatusFilterWithNoMatches() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA2),getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().listStatus(getTestRootPath(fc,"test"),TEST_X_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultiplePathMatchesAndNonTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/*"),TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testOpen2() throws IOException {
final Path rootPath=getTestRootPath(fc,"test");
final Path path=new Path(rootPath,"zoo");
createFile(path);
final long length=fc.getFileStatus(path).getLen();
FSDataInputStream fsdis=fc.open(path,2048);
try {
byte[] bb=new byte[(int)length];
fsdis.readFully(bb);
assertArrayEquals(data,bb);
}
finally {
fsdis.close();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusWithMultipleWildCardMatches() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop*/*"));
Assert.assertEquals(4,paths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA2),paths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteOnExitUnexisting() throws IOException {
final Path rootPath=getTestRootPath(fc,"test");
final Path path=new Path(rootPath,"zoo");
boolean registered=fc.deleteOnExit(path);
assertTrue(!registered);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultiplePathWildcardsAndNonTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/a??"),TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testUnsupportedSymlink() throws IOException {
Path file=getTestRootPath(fc,"file");
Path link=getTestRootPath(fc,"linkToFile");
if (!fc.getDefaultFileSystem().supportsSymlinks()) {
try {
fc.createSymlink(file,link,false);
Assert.fail("Created a symlink on a file system that " + "does not support symlinks.");
}
catch ( IOException e) {
}
createFile(file);
try {
fc.getLinkTarget(file);
Assert.fail("Got a link target on a file system that " + "does not support symlinks.");
}
catch ( IOException e) {
}
Assert.assertEquals(fc.getFileStatus(file),fc.getFileLinkStatus(file));
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDeleteRecursively() throws IOException {
Path dir=getTestRootPath(fc,"test/hadoop");
Path file=getTestRootPath(fc,"test/hadoop/file");
Path subdir=getTestRootPath(fc,"test/hadoop/subdir");
createFile(file);
fc.mkdir(subdir,FsPermission.getDefault(),true);
Assert.assertTrue("File exists",exists(fc,file));
Assert.assertTrue("Dir exists",exists(fc,dir));
Assert.assertTrue("Subdir exists",exists(fc,subdir));
try {
fc.delete(dir,false);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertTrue("File still exists",exists(fc,file));
Assert.assertTrue("Dir still exists",exists(fc,dir));
Assert.assertTrue("Subdir still exists",exists(fc,subdir));
Assert.assertTrue("Deleted",fc.delete(dir,true));
Assert.assertFalse("File doesn't exist",exists(fc,file));
Assert.assertFalse("Dir doesn't exist",exists(fc,dir));
Assert.assertFalse("Subdir doesn't exist",exists(fc,subdir));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetFileContext1() throws IOException {
final Path rootPath=getTestRootPath(fc,"test");
AbstractFileSystem asf=fc.getDefaultFileSystem();
FileContext fc2=FileContext.getFileContext(asf);
final Path path=new Path(rootPath,"zoo");
FSDataOutputStream out=fc2.create(path,EnumSet.of(CREATE),Options.CreateOpts.createParent());
out.close();
Path pathResolved=fc2.resolvePath(path);
assertEquals(pathResolved.toUri().getPath(),path.toUri().getPath());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithSomePathMatchesAndTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/*"),DEFAULT_FILTER);
Assert.assertEquals(3,filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWriteInNonExistentDirectory() throws IOException {
Path path=getTestRootPath(fc,"test/hadoop/file");
Assert.assertFalse("Parent doesn't exist",exists(fc,path.getParent()));
createFile(path);
Assert.assertTrue("Exists",exists(fc,path));
Assert.assertEquals("Length",data.length,fc.getFileStatus(path).getLen());
Assert.assertTrue("Parent exists",exists(fc,path.getParent()));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusNonExistentFile() throws Exception {
FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoopfsdf"));
Assert.assertNull(paths);
paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoopfsdf/?"));
Assert.assertEquals(0,paths.length);
paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoopfsdf/xyz*/?"));
Assert.assertEquals(0,paths.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultipleWildCardMatchesAndTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/a??"),DEFAULT_FILTER);
Assert.assertEquals(3,filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier BooleanVerifier
@Test public void testDeleteNonExistentFile() throws IOException {
Path path=getTestRootPath(fc,"test/hadoop/file");
Assert.assertFalse("Doesn't exist",exists(fc,path));
Assert.assertFalse("No deletion",fc.delete(path,true));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSetVerifyChecksum() throws IOException {
final Path rootPath=getTestRootPath(fc,"test");
final Path path=new Path(rootPath,"zoo");
FSDataOutputStream out=fc.create(path,EnumSet.of(CREATE),Options.CreateOpts.createParent());
try {
fc.setVerifyChecksum(true,path);
out.write(data,0,data.length);
}
finally {
out.close();
}
FileStatus fileStatus=fc.getFileStatus(path);
final long len=fileStatus.getLen();
assertTrue(len == data.length);
byte[] bb=new byte[(int)len];
FSDataInputStream fsdis=fc.open(path);
try {
fsdis.read(bb);
}
finally {
fsdis.close();
}
assertArrayEquals(data,bb);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGlobStatusFilterWithNoMatchingPathsAndNonTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/?"),TEST_X_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteEmptyDirectory() throws IOException {
Path dir=getTestRootPath(fc,"test/hadoop");
fc.mkdir(dir,FsPermission.getDefault(),true);
Assert.assertTrue("Dir exists",exists(fc,dir));
Assert.assertTrue("Deleted",fc.delete(dir,false));
Assert.assertFalse("Dir doesn't exist",exists(fc,dir));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusWithMultipleMatchesOfSingleChar() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/ax?"));
Assert.assertEquals(2,paths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),paths));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWorkingDirectory() throws Exception {
Path workDir=new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc),new Path("test"));
fc.setWorkingDirectory(workDir);
Assert.assertEquals(workDir,fc.getWorkingDirectory());
fc.setWorkingDirectory(new Path("."));
Assert.assertEquals(workDir,fc.getWorkingDirectory());
fc.setWorkingDirectory(new Path(".."));
Assert.assertEquals(workDir.getParent(),fc.getWorkingDirectory());
workDir=new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc),new Path("test"));
fc.setWorkingDirectory(workDir);
Assert.assertEquals(workDir,fc.getWorkingDirectory());
Path relativeDir=new Path("existingDir1");
Path absoluteDir=new Path(workDir,"existingDir1");
fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true);
fc.setWorkingDirectory(relativeDir);
Assert.assertEquals(absoluteDir,fc.getWorkingDirectory());
absoluteDir=getTestRootPath(fc,"test/existingDir2");
fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true);
fc.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir,fc.getWorkingDirectory());
Path absolutePath=new Path(absoluteDir,"foo");
fc.create(absolutePath,EnumSet.of(CREATE)).close();
fc.open(new Path("foo")).close();
fc.mkdir(new Path("newDir"),FileContext.DEFAULT_PERM,true);
Assert.assertTrue(isDir(fc,new Path(absoluteDir,"newDir")));
absoluteDir=getTestRootPath(fc,"nonexistingPath");
try {
fc.setWorkingDirectory(absoluteDir);
Assert.fail("cd to non existing dir should have failed");
}
catch ( Exception e) {
}
absoluteDir=new Path(localFsRootPath,"existingDir");
fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true);
fc.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir,fc.getWorkingDirectory());
Path aRegularFile=new Path("aRegularFile");
createFile(aRegularFile);
try {
fc.setWorkingDirectory(aRegularFile);
fail("An IOException expected.");
}
catch ( IOException ioe) {
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGlobStatusWithNoMatchesInPath() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/?"));
Assert.assertEquals(0,paths.length);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir=getTestRootPath(fc,"test/hadoop");
Assert.assertFalse(exists(fc,testDir));
fc.mkdir(testDir,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc,testDir));
createFile(getTestRootPath(fc,"test/hadoop/file"));
Path testSubDir=getTestRootPath(fc,"test/hadoop/file/subdir");
try {
fc.mkdir(testSubDir,FsPermission.getDefault(),true);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fc,testSubDir));
Path testDeepSubDir=getTestRootPath(fc,"test/hadoop/file/deep/sub/dir");
try {
fc.mkdir(testDeepSubDir,FsPermission.getDefault(),true);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fc,testDeepSubDir));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListStatus() throws Exception {
Path[] testDirs={getTestRootPath(fc,"test/hadoop/a"),getTestRootPath(fc,"test/hadoop/b"),getTestRootPath(fc,"test/hadoop/c/1")};
Assert.assertFalse(exists(fc,testDirs[0]));
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
FileStatus[] paths=fc.util().listStatus(getTestRootPath(fc,"test"));
Assert.assertEquals(1,paths.length);
Assert.assertEquals(getTestRootPath(fc,"test/hadoop"),paths[0].getPath());
paths=fc.util().listStatus(getTestRootPath(fc,"test/hadoop"));
Assert.assertEquals(3,paths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/a"),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/b"),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/c"),paths));
paths=fc.util().listStatus(getTestRootPath(fc,"test/hadoop/a"));
Assert.assertEquals(0,paths.length);
RemoteIterator pathsIterator=fc.listStatus(getTestRootPath(fc,"test"));
Assert.assertEquals(getTestRootPath(fc,"test/hadoop"),pathsIterator.next().getPath());
Assert.assertFalse(pathsIterator.hasNext());
pathsIterator=fc.listStatus(getTestRootPath(fc,"test/hadoop"));
FileStatus[] subdirs=new FileStatus[3];
int i=0;
while (i < 3 && pathsIterator.hasNext()) {
subdirs[i++]=pathsIterator.next();
}
Assert.assertFalse(pathsIterator.hasNext());
Assert.assertTrue(i == 3);
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/a"),subdirs));
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/b"),subdirs));
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/c"),subdirs));
pathsIterator=fc.listStatus(getTestRootPath(fc,"test/hadoop/a"));
Assert.assertFalse(pathsIterator.hasNext());
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testListCorruptFileBlocks() throws IOException {
final Path rootPath=getTestRootPath(fc,"test");
final Path path=new Path(rootPath,"zoo");
createFile(path);
try {
final RemoteIterator remoteIterator=fc.listCorruptFileBlocks(path);
if (listCorruptedBlocksSupported()) {
assertTrue(remoteIterator != null);
Path p;
while (remoteIterator.hasNext()) {
p=remoteIterator.next();
System.out.println("corrupted block: " + p);
}
try {
remoteIterator.next();
fail();
}
catch ( NoSuchElementException nsee) {
}
}
else {
fail();
}
}
catch ( UnsupportedOperationException uoe) {
if (listCorruptedBlocksSupported()) {
fail(uoe.toString());
}
else {
}
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testSetOwner() throws IOException {
if (Path.WINDOWS) {
System.out.println("Cannot run test for Windows");
return;
}
String filename="bar";
Path f=fileContextTestHelper.getTestRootPath(fc,filename);
createFile(fc,f);
List groups=null;
try {
groups=getGroups();
System.out.println(filename + ": " + fc.getFileStatus(f).getPermission());
}
catch ( IOException e) {
System.out.println(StringUtils.stringifyException(e));
System.out.println("Cannot run test");
return;
}
if (groups == null || groups.size() < 1) {
System.out.println("Cannot run test: need at least one group. groups=" + groups);
return;
}
try {
String g0=groups.get(0);
fc.setOwner(f,null,g0);
Assert.assertEquals(g0,fc.getFileStatus(f).getGroup());
if (groups.size() > 1) {
String g1=groups.get(1);
fc.setOwner(f,null,g1);
Assert.assertEquals(g1,fc.getFileStatus(f).getGroup());
}
else {
System.out.println("Not testing changing the group since user " + "belongs to only one group.");
}
try {
fc.setOwner(f,null,null);
fail("Exception expected.");
}
catch ( IllegalArgumentException iae) {
}
}
finally {
cleanupFile(fc,f);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteDirectory() throws IOException {
String dirName="dirTest";
Path testDirPath=qualifiedPath(dirName,fc2);
Assert.assertFalse(exists(fc2,testDirPath));
fc1.mkdir(testDirPath,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc2,testDirPath));
Assert.assertTrue(isDir(fc2,testDirPath));
fc2.delete(testDirPath,true);
Assert.assertFalse(isDir(fc2,testDirPath));
String dirNames[]={"deleteTest/testDir","deleteTest/test Dir","deleteTest/test*Dir","deleteTest/test#Dir","deleteTest/test1234","deleteTest/1234Test","deleteTest/test)Dir","deleteTest/test_DIr","deleteTest/()&^%$#@!~_+}{>"," ","^ "};
for ( String f : dirNames) {
if (!isTestableFileNameOnPlatform(f)) {
continue;
}
Path testPath=qualifiedPath(f,fc2);
Assert.assertFalse(exists(fc2,testPath));
fc1.mkdir(testPath,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc2,testPath));
Assert.assertTrue(isDir(fc2,testPath));
Assert.assertTrue(fc2.delete(testPath,true));
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(isDir(fc2,testPath));
}
}
APIUtilityVerifier BooleanVerifier
@Test public void testIsDirectory() throws IOException {
String dirName="dirTest";
String invalidDir="nonExistantDir";
String rootDir="/";
Path existingPath=qualifiedPath(dirName,fc2);
Path nonExistingPath=qualifiedPath(invalidDir,fc2);
Path pathToRootDir=qualifiedPath(rootDir,fc2);
fc1.mkdir(existingPath,FsPermission.getDefault(),true);
Assert.assertTrue(isDir(fc2,existingPath));
Assert.assertTrue(isDir(fc2,pathToRootDir));
Assert.assertFalse(isDir(fc2,nonExistingPath));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteFile() throws IOException {
Path testPath=qualifiedPath("testFile",fc2);
Assert.assertFalse(exists(fc2,testPath));
createFile(fc1,testPath);
Assert.assertTrue(exists(fc2,testPath));
fc2.delete(testPath,false);
Assert.assertFalse(exists(fc2,testPath));
}
APIUtilityVerifier BooleanVerifier
@Test public void testCreateFile() throws IOException {
String fileNames[]={"testFile","test File","test*File","test#File","test1234","1234Test","test)File","test_File","()&^%$#@!~_+}{>"," ","^ "};
for ( String f : fileNames) {
if (!isTestableFileNameOnPlatform(f)) {
continue;
}
Path testPath=qualifiedPath(f,fc2);
Assert.assertFalse(exists(fc2,testPath));
createFile(fc1,testPath);
Assert.assertTrue(exists(fc2,testPath));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteNonExistingDirectory() throws IOException {
String testDirName="testFile";
Path testPath=qualifiedPath(testDirName,fc2);
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
fc1.mkdir(testPath,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc2,testPath));
Assert.assertTrue(fc2.delete(testPath,false));
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testCreateFileWithNullName() throws IOException {
String fileName=null;
try {
Path testPath=qualifiedPath(fileName,fc2);
Assert.assertFalse(exists(fc2,testPath));
createFile(fc1,testPath);
Assert.fail("Create file with null name should throw IllegalArgumentException.");
}
catch ( IllegalArgumentException e) {
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteNonExistingFile() throws IOException {
String testFileName="testFile";
Path testPath=qualifiedPath(testFileName,fc2);
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
createFile(fc1,testPath);
Assert.assertTrue(exists(fc2,testPath));
Assert.assertTrue(fc2.delete(testPath,false));
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testModificationTime() throws IOException {
String testFile="file1";
long fc2ModificationTime, fc1ModificationTime;
Path testPath=qualifiedPath(testFile,fc2);
createFile(fc1,testPath);
fc1ModificationTime=fc1.getFileStatus(testPath).getModificationTime();
fc2ModificationTime=fc2.getFileStatus(testPath).getModificationTime();
Assert.assertEquals(fc1ModificationTime,fc2ModificationTime);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteNonExistingFileInDir() throws IOException {
String testFileInDir="testDir/testDir/TestFile";
Path testPath=qualifiedPath(testFileInDir,fc2);
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
createFile(fc1,testPath);
Assert.assertTrue(exists(fc2,testPath));
Assert.assertTrue(fc2.delete(testPath,false));
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCreateFileInNonExistingDirectory() throws IOException {
String fileName="testDir/testFile";
Path testPath=qualifiedPath(fileName,fc2);
Assert.assertFalse(exists(fc2,testPath));
createFile(fc1,testPath);
Assert.assertTrue(isDir(fc2,testPath.getParent()));
Assert.assertEquals("testDir",testPath.getParent().getName());
Assert.assertTrue(exists(fc2,testPath));
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testCreateExistingFile() throws IOException {
String fileName="testFile";
Path testPath=qualifiedPath(fileName,fc2);
Assert.assertFalse(exists(fc2,testPath));
createFile(fc1,testPath);
try {
createFile(fc2,testPath);
Assert.fail("Create existing file should throw an IOException.");
}
catch ( IOException e) {
}
Assert.assertTrue(exists(fc2,testPath));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListStatus() throws Exception {
final String hPrefix="test/hadoop";
final String[] dirs={hPrefix + "/a",hPrefix + "/b",hPrefix + "/c",hPrefix + "/1",hPrefix + "/#@#@",hPrefix + "/&*#$#$@234"};
ArrayList testDirs=new ArrayList();
for ( String d : dirs) {
if (!isTestableFileNameOnPlatform(d)) {
continue;
}
testDirs.add(qualifiedPath(d,fc2));
}
Assert.assertFalse(exists(fc1,testDirs.get(0)));
for ( Path path : testDirs) {
fc1.mkdir(path,FsPermission.getDefault(),true);
}
FileStatus[] paths=fc1.util().listStatus(qualifiedPath("test",fc1));
Assert.assertEquals(1,paths.length);
Assert.assertEquals(qualifiedPath(hPrefix,fc1),paths[0].getPath());
paths=fc1.util().listStatus(qualifiedPath(hPrefix,fc1));
Assert.assertEquals(testDirs.size(),paths.length);
for (int i=0; i < testDirs.size(); i++) {
boolean found=false;
for (int j=0; j < paths.length; j++) {
if (qualifiedPath(testDirs.get(i).toString(),fc1).equals(paths[j].getPath())) {
found=true;
}
}
Assert.assertTrue(testDirs.get(i) + " not found",found);
}
paths=fc1.util().listStatus(qualifiedPath(dirs[0],fc1));
Assert.assertEquals(0,paths.length);
RemoteIterator pathsItor=fc1.listStatus(qualifiedPath("test",fc1));
Assert.assertEquals(qualifiedPath(hPrefix,fc1),pathsItor.next().getPath());
Assert.assertFalse(pathsItor.hasNext());
pathsItor=fc1.listStatus(qualifiedPath(hPrefix,fc1));
int dirLen=0;
for (; pathsItor.hasNext(); dirLen++) {
boolean found=false;
FileStatus stat=pathsItor.next();
for (int j=0; j < dirs.length; j++) {
if (qualifiedPath(dirs[j],fc1).equals(stat.getPath())) {
found=true;
break;
}
}
Assert.assertTrue(stat.getPath() + " not found",found);
}
Assert.assertEquals(testDirs.size(),dirLen);
pathsItor=fc1.listStatus(qualifiedPath(dirs[0],fc1));
Assert.assertFalse(pathsItor.hasNext());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir=qualifiedPath("test/hadoop",fc2);
Assert.assertFalse(exists(fc2,testDir));
fc2.mkdir(testDir,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc2,testDir));
createFile(fc1,qualifiedPath("test/hadoop/file",fc2));
Path testSubDir=qualifiedPath("test/hadoop/file/subdir",fc2);
try {
fc1.mkdir(testSubDir,FsPermission.getDefault(),true);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fc1,testSubDir));
Path testDeepSubDir=qualifiedPath("test/hadoop/file/deep/sub/dir",fc1);
try {
fc2.mkdir(testDeepSubDir,FsPermission.getDefault(),true);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fc1,testDeepSubDir));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCreateDirectory() throws IOException {
Path path=qualifiedPath("test/hadoop",fc2);
Path falsePath=qualifiedPath("path/doesnot.exist",fc2);
Path subDirPath=qualifiedPath("dir0",fc2);
Assert.assertFalse(exists(fc1,path));
Assert.assertFalse(isFile(fc1,path));
Assert.assertFalse(isDir(fc1,path));
fc1.mkdir(path,FsPermission.getDefault(),true);
Assert.assertTrue(isDir(fc2,path));
Assert.assertTrue(exists(fc2,path));
Assert.assertFalse(isFile(fc2,path));
fc1.mkdir(subDirPath,FsPermission.getDefault(),true);
fc1.mkdir(subDirPath,FsPermission.getDefault(),true);
fc1.mkdir(subDirPath,FsPermission.getDefault(),true);
Path parentDir=path.getParent();
Assert.assertTrue(exists(fc2,parentDir));
Assert.assertFalse(isFile(fc2,parentDir));
Path grandparentDir=parentDir.getParent();
Assert.assertTrue(exists(fc2,grandparentDir));
Assert.assertFalse(isFile(fc2,grandparentDir));
Assert.assertFalse(exists(fc2,falsePath));
Assert.assertFalse(isDir(fc2,falsePath));
String dirNames[]={"createTest/testDir","createTest/test Dir","deleteTest/test*Dir","deleteTest/test#Dir","deleteTest/test1234","deleteTest/test_DIr","deleteTest/1234Test","deleteTest/test)Dir","deleteTest/()&^%$#@!~_+}{>"," ","^ "};
for ( String f : dirNames) {
if (!isTestableFileNameOnPlatform(f)) {
continue;
}
Path testPath=qualifiedPath(f,fc2);
Assert.assertFalse(exists(fc2,testPath));
fc1.mkdir(testPath,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc2,testPath));
Assert.assertTrue(isDir(fc2,testPath));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCorruptedChecksum() throws Exception {
Path testPath=new Path(TEST_ROOT_DIR,"testCorruptChecksum");
Path checksumPath=localFs.getChecksumFile(testPath);
FSDataOutputStream out=localFs.create(testPath,true);
out.write("testing 1 2 3".getBytes());
out.close();
assertTrue(localFs.exists(checksumPath));
FileStatus stat=localFs.getFileStatus(checksumPath);
out=localFs.getRawFileSystem().create(testPath,true);
out.write("testing stale checksum".getBytes());
out.close();
assertTrue(localFs.exists(checksumPath));
assertEquals(stat,localFs.getFileStatus(checksumPath));
Exception e=null;
try {
localFs.setVerifyChecksum(true);
readFile(localFs,testPath,1024);
}
catch ( ChecksumException ce) {
e=ce;
}
finally {
assertNotNull("got checksum error",e);
}
localFs.setVerifyChecksum(false);
String str=readFile(localFs,testPath,1024);
assertEquals("testing stale checksum",str);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test to ensure that if the checksum file is truncated, a
* ChecksumException is thrown
*/
@Test public void testTruncatedChecksum() throws Exception {
Path testPath=new Path(TEST_ROOT_DIR,"testtruncatedcrc");
FSDataOutputStream fout=localFs.create(testPath);
fout.write("testing truncation".getBytes());
fout.close();
Path checksumFile=localFs.getChecksumFile(testPath);
FileSystem rawFs=localFs.getRawFileSystem();
FSDataInputStream checksumStream=rawFs.open(checksumFile);
byte buf[]=new byte[8192];
int read=checksumStream.read(buf,0,buf.length);
checksumStream.close();
FSDataOutputStream replaceStream=rawFs.create(checksumFile);
replaceStream.write(buf,0,read - 1);
replaceStream.close();
try {
readFile(localFs,testPath,1024);
fail("Did not throw a ChecksumException when reading truncated " + "crc file");
}
catch ( ChecksumException ie) {
}
localFs.setVerifyChecksum(false);
String str=readFile(localFs,testPath,1024).toString();
assertTrue("read","testing truncation".equals(str));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testVerifyChecksum() throws Exception {
Path testPath=new Path(TEST_ROOT_DIR,"testPath");
Path testPath11=new Path(TEST_ROOT_DIR,"testPath11");
FSDataOutputStream fout=localFs.create(testPath);
fout.write("testing".getBytes());
fout.close();
fout=localFs.create(testPath11);
fout.write("testing you".getBytes());
fout.close();
readFile(localFs,testPath,128);
readFile(localFs,testPath,511);
readFile(localFs,testPath,512);
readFile(localFs,testPath,513);
readFile(localFs,testPath,1023);
readFile(localFs,testPath,1024);
readFile(localFs,testPath,1025);
localFs.delete(localFs.getChecksumFile(testPath),true);
assertTrue("checksum deleted",!localFs.exists(localFs.getChecksumFile(testPath)));
FileUtil.copy(localFs,localFs.getChecksumFile(testPath11),localFs,localFs.getChecksumFile(testPath),false,true,localFs.getConf());
assertTrue("checksum exists",localFs.exists(localFs.getChecksumFile(testPath)));
boolean errorRead=false;
try {
readFile(localFs,testPath,1024);
}
catch ( ChecksumException ie) {
errorRead=true;
}
assertTrue("error reading",errorRead);
localFs.setVerifyChecksum(false);
String str=readFile(localFs,testPath,1024).toString();
assertTrue("read","testing".equals(str));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=5000) public void testGetMountCurrentDirectory() throws Exception {
File currentDirectory=new File(".");
String workingDir=currentDirectory.getAbsoluteFile().getCanonicalPath();
DF df=new DF(new File(workingDir),0L);
String mountPath=df.getMount();
File mountDir=new File(mountPath);
assertTrue("Mount dir [" + mountDir.getAbsolutePath() + "] should exist.",mountDir.exists());
assertTrue("Mount dir [" + mountDir.getAbsolutePath() + "] should be directory.",mountDir.isDirectory());
assertTrue("Working dir [" + workingDir + "] should start with ["+ mountPath+ "].",workingDir.startsWith(mountPath));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testZeroCopyMmapCache() throws Exception {
HdfsConfiguration conf=initZeroCopyTest();
MiniDFSCluster cluster=null;
final Path TEST_PATH=new Path("/a");
final int TEST_FILE_LENGTH=16385;
final int RANDOM_SEED=23453;
final String CONTEXT="testZeroCopyMmapCacheContext";
FSDataInputStream fsIn=null;
ByteBuffer results[]={null,null,null,null};
DistributedFileSystem fs=null;
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,RANDOM_SEED);
try {
DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1);
}
catch ( InterruptedException e) {
Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
}
catch ( TimeoutException e) {
Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
}
fsIn=fs.open(TEST_PATH);
byte original[]=new byte[TEST_FILE_LENGTH];
IOUtils.readFully(fsIn,original,0,TEST_FILE_LENGTH);
fsIn.close();
fsIn=fs.open(TEST_PATH);
final ShortCircuitCache cache=ClientContext.get(CONTEXT,new DFSClient.Conf(conf)).getShortCircuitCache();
cache.accept(new CountingVisitor(0,5,5,0));
results[0]=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
fsIn.seek(0);
results[1]=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
final ExtendedBlock firstBlock=DFSTestUtil.getFirstBlock(fs,TEST_PATH);
cache.accept(new CacheVisitor(){
@Override public void visit( int numOutstandingMmaps, Map replicas, Map failedLoads, Map evictable, Map evictableMmapped){
ShortCircuitReplica replica=replicas.get(new ExtendedBlockId(firstBlock.getBlockId(),firstBlock.getBlockPoolId()));
Assert.assertNotNull(replica);
Assert.assertTrue(replica.hasMmap());
Assert.assertNull(replica.getEvictableTimeNs());
}
}
);
results[2]=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
results[3]=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
cache.accept(new CountingVisitor(3,5,2,0));
for ( ByteBuffer buffer : results) {
if (buffer != null) {
fsIn.releaseBuffer(buffer);
}
}
fsIn.close();
GenericTestUtils.waitFor(new Supplier(){
public Boolean get(){
final MutableBoolean finished=new MutableBoolean(false);
cache.accept(new CacheVisitor(){
@Override public void visit( int numOutstandingMmaps, Map replicas, Map failedLoads, Map evictable, Map evictableMmapped){
finished.setValue(evictableMmapped.isEmpty());
}
}
);
return finished.booleanValue();
}
}
,10,60000);
cache.accept(new CountingVisitor(0,-1,-1,-1));
fs.close();
cluster.shutdown();
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test that we can zero-copy read cached data even without disabling
* checksums.
*/
@Test(timeout=120000) public void testZeroCopyReadOfCachedData() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
BlockReaderTestUtil.enableBlockReaderFactoryTracing();
BlockReaderTestUtil.enableHdfsCachingTracing();
final int TEST_FILE_LENGTH=16385;
final Path TEST_PATH=new Path("/a");
final int RANDOM_SEED=23453;
HdfsConfiguration conf=initZeroCopyTest();
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,false);
final String CONTEXT="testZeroCopyReadOfCachedData";
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT);
conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,DFSTestUtil.roundUpToMultiple(TEST_FILE_LENGTH,4096));
MiniDFSCluster cluster=null;
ByteBuffer result=null, result2=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FsDatasetSpi> fsd=cluster.getDataNodes().get(0).getFSDataset();
DistributedFileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,RANDOM_SEED);
DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1);
byte original[]=DFSTestUtil.calculateFileContentsFromSeed(RANDOM_SEED,TEST_FILE_LENGTH);
FSDataInputStream fsIn=fs.open(TEST_PATH);
try {
result=fsIn.read(null,TEST_FILE_LENGTH / 2,EnumSet.noneOf(ReadOption.class));
Assert.fail("expected UnsupportedOperationException");
}
catch ( UnsupportedOperationException e) {
}
fs.addCachePool(new CachePoolInfo("pool1"));
long directiveId=fs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(TEST_PATH).setReplication((short)1).setPool("pool1").build());
int numBlocks=(int)Math.ceil((double)TEST_FILE_LENGTH / BLOCK_SIZE);
DFSTestUtil.verifyExpectedCacheUsage(DFSTestUtil.roundUpToMultiple(TEST_FILE_LENGTH,BLOCK_SIZE),numBlocks,cluster.getDataNodes().get(0).getFSDataset());
try {
result=fsIn.read(null,TEST_FILE_LENGTH,EnumSet.noneOf(ReadOption.class));
}
catch ( UnsupportedOperationException e) {
Assert.fail("expected to be able to read cached file via zero-copy");
}
Assert.assertArrayEquals(Arrays.copyOfRange(original,0,BLOCK_SIZE),byteBufferToArray(result));
FSDataInputStream fsIn2=fs.open(TEST_PATH);
try {
result2=fsIn2.read(null,TEST_FILE_LENGTH,EnumSet.noneOf(ReadOption.class));
}
catch ( UnsupportedOperationException e) {
Assert.fail("expected to be able to read cached file via zero-copy");
}
Assert.assertArrayEquals(Arrays.copyOfRange(original,0,BLOCK_SIZE),byteBufferToArray(result2));
fsIn2.releaseBuffer(result2);
fsIn2.close();
final ExtendedBlock firstBlock=DFSTestUtil.getFirstBlock(fs,TEST_PATH);
final ShortCircuitCache cache=ClientContext.get(CONTEXT,new DFSClient.Conf(conf)).getShortCircuitCache();
waitForReplicaAnchorStatus(cache,firstBlock,true,true,1);
fs.removeCacheDirective(directiveId);
waitForReplicaAnchorStatus(cache,firstBlock,false,true,1);
fsIn.releaseBuffer(result);
waitForReplicaAnchorStatus(cache,firstBlock,false,false,1);
DFSTestUtil.verifyExpectedCacheUsage(0,0,fsd);
fsIn.close();
fs.close();
cluster.shutdown();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testFileContextResolveAfs() throws IOException {
Configuration conf=new Configuration();
localFs=FileSystem.get(conf);
Path localPath=new Path(TEST_ROOT_DIR_LOCAL + "/TestFileContextResolveAfs1");
Path linkPath=localFs.makeQualified(new Path(TEST_ROOT_DIR_LOCAL,"TestFileContextResolveAfs2"));
localFs.mkdirs(new Path(TEST_ROOT_DIR_LOCAL));
localFs.create(localPath);
fc.createSymlink(localPath,linkPath,true);
Set afsList=fc.resolveAbstractFileSystems(linkPath);
Assert.assertEquals(1,afsList.size());
localFs.deleteOnExit(localPath);
localFs.deleteOnExit(linkPath);
localFs.close();
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testDefaultFsUris() throws Exception {
final Configuration conf=new Configuration();
conf.set("fs.defaultfs.impl",DefaultFs.class.getName());
final URI defaultUri=URI.create("defaultfs://host");
FileSystem.setDefaultUri(conf,defaultUri);
FileSystem fs=null;
final FileSystem defaultFs=FileSystem.get(conf);
assertEquals(defaultUri,defaultFs.getUri());
fs=FileSystem.get(URI.create("defaultfs:/"),conf);
assertSame(defaultFs,fs);
fs=FileSystem.get(URI.create("defaultfs:///"),conf);
assertSame(defaultFs,fs);
fs=FileSystem.get(URI.create("defaultfs://host"),conf);
assertSame(defaultFs,fs);
fs=FileSystem.get(URI.create("defaultfs://host2"),conf);
assertNotSame(defaultFs,fs);
fs=FileSystem.get(URI.create("/"),conf);
assertSame(defaultFs,fs);
try {
fs=FileSystem.get(URI.create("//host"),conf);
fail("got fs with auth but no scheme");
}
catch ( Exception e) {
assertEquals("No FileSystem for scheme: null",e.getMessage());
}
try {
fs=FileSystem.get(URI.create("//host2"),conf);
fail("got fs with auth but no scheme");
}
catch ( Exception e) {
assertEquals("No FileSystem for scheme: null",e.getMessage());
}
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier
@Test public void testUserFS() throws Exception {
final Configuration conf=new Configuration();
conf.set("fs.cachedfile.impl",FileSystem.getFileSystemClass("file",null).getName());
FileSystem fsU1=FileSystem.get(new URI("cachedfile://a"),conf,"bar");
FileSystem fsU2=FileSystem.get(new URI("cachedfile://a"),conf,"foo");
assertNotSame(fsU1,fsU2);
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier
@SuppressWarnings("unchecked") @Test public void testCacheForUgi() throws Exception {
final Configuration conf=new Configuration();
conf.set("fs.cachedfile.impl",FileSystem.getFileSystemClass("file",null).getName());
UserGroupInformation ugiA=UserGroupInformation.createRemoteUser("foo");
UserGroupInformation ugiB=UserGroupInformation.createRemoteUser("bar");
FileSystem fsA=ugiA.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"),conf);
}
}
);
FileSystem fsA1=ugiA.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"),conf);
}
}
);
assertSame(fsA,fsA1);
FileSystem fsB=ugiB.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"),conf);
}
}
);
assertNotSame(fsA,fsB);
Token t1=mock(Token.class);
UserGroupInformation ugiA2=UserGroupInformation.createRemoteUser("foo");
fsA=ugiA2.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"),conf);
}
}
);
assertNotSame(fsA,fsA1);
ugiA.addToken(t1);
fsA=ugiA.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"),conf);
}
}
);
assertSame(fsA,fsA1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testFsUniqueness() throws Exception {
final Configuration conf=new Configuration();
conf.set("fs.cachedfile.impl",FileSystem.getFileSystemClass("file",null).getName());
FileSystem fs1=FileSystem.get(conf);
FileSystem fs2=FileSystem.get(conf);
assertTrue(fs1 == fs2);
fs1=FileSystem.newInstance(new URI("cachedfile://a"),conf,"bar");
fs2=FileSystem.newInstance(new URI("cachedfile://a"),conf,"bar");
assertTrue(fs1 != fs2 && !fs1.equals(fs2));
fs1.close();
fs2.close();
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier
@Test public void testCloseAllForUGI() throws Exception {
final Configuration conf=new Configuration();
conf.set("fs.cachedfile.impl",FileSystem.getFileSystemClass("file",null).getName());
UserGroupInformation ugiA=UserGroupInformation.createRemoteUser("foo");
FileSystem fsA=ugiA.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"),conf);
}
}
);
FileSystem fsA1=ugiA.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"),conf);
}
}
);
assertSame(fsA,fsA1);
FileSystem.closeAllForUGI(ugiA);
fsA1=ugiA.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"),conf);
}
}
);
assertNotSame(fsA,fsA1);
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier
@Test public void testCacheDisabled() throws Exception {
Configuration conf=new Configuration();
conf.set("fs.uncachedfile.impl",FileSystem.getFileSystemClass("file",null).getName());
conf.setBoolean("fs.uncachedfile.impl.disable.cache",true);
FileSystem fs1=FileSystem.get(new URI("uncachedfile://a"),conf);
FileSystem fs2=FileSystem.get(new URI("uncachedfile://a"),conf);
assertNotSame(fs1,fs2);
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier
@Test public void testCacheEnabled() throws Exception {
Configuration conf=new Configuration();
conf.set("fs.cachedfile.impl",FileSystem.getFileSystemClass("file",null).getName());
FileSystem fs1=FileSystem.get(new URI("cachedfile://a"),conf);
FileSystem fs2=FileSystem.get(new URI("cachedfile://a"),conf);
assertSame(fs1,fs2);
}
APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testListAPI() throws IOException {
setupDirs();
String[] files=FileUtil.list(partitioned);
Assert.assertEquals("Unexpected number of pre-existing files",2,files.length);
File newDir=new File(tmp.getPath(),"test");
newDir.mkdir();
Assert.assertTrue("Failed to create test dir",newDir.exists());
files=FileUtil.list(newDir);
Assert.assertEquals("New directory unexpectedly contains files",0,files.length);
newDir.delete();
Assert.assertFalse("Failed to delete test dir",newDir.exists());
try {
files=FileUtil.list(newDir);
Assert.fail("IOException expected on list() for non-existent dir " + newDir.toString());
}
catch ( IOException ioe) {
}
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=30000) public void testFullyDelete() throws IOException {
setupDirs();
boolean ret=FileUtil.fullyDelete(del);
Assert.assertTrue(ret);
Assert.assertFalse(del.exists());
validateTmpDir();
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests if fullyDelete deletes
* (a) symlink to file only and not the file pointed to by symlink.
* (b) symlink to dir only and not the dir pointed to by symlink.
* @throws IOException
*/
@Test(timeout=30000) public void testFullyDeleteSymlinks() throws IOException {
setupDirs();
File link=new File(del,LINK);
Assert.assertEquals(5,del.list().length);
boolean ret=FileUtil.fullyDelete(link);
Assert.assertTrue(ret);
Assert.assertFalse(link.exists());
Assert.assertEquals(4,del.list().length);
validateTmpDir();
File linkDir=new File(del,"tmpDir");
ret=FileUtil.fullyDelete(linkDir);
Assert.assertTrue(ret);
Assert.assertFalse(linkDir.exists());
Assert.assertEquals(3,del.list().length);
validateTmpDir();
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testStat2Paths2(){
Path defaultPath=new Path("file://default");
Path[] paths=FileUtil.stat2Paths(null,defaultPath);
assertEquals(1,paths.length);
assertEquals(defaultPath,paths[0]);
paths=FileUtil.stat2Paths(null,null);
assertTrue(paths != null);
assertEquals(1,paths.length);
assertEquals(null,paths[0]);
Path path1=new Path("file://foo");
Path path2=new Path("file://moo");
FileStatus[] fileStatuses=new FileStatus[]{new FileStatus(3,false,0,0,0,path1),new FileStatus(3,false,0,0,0,path2)};
paths=FileUtil.stat2Paths(fileStatuses,defaultPath);
assertEquals(2,paths.length);
assertEquals(paths[0],path1);
assertEquals(paths[1],path2);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testFullyDeleteContents() throws IOException {
setupDirs();
boolean ret=FileUtil.fullyDeleteContents(del);
Assert.assertTrue(ret);
Assert.assertTrue(del.exists());
Assert.assertEquals(0,del.listFiles().length);
validateTmpDir();
}
APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testStat2Paths1(){
assertNull(FileUtil.stat2Paths(null));
FileStatus[] fileStatuses=new FileStatus[0];
Path[] paths=FileUtil.stat2Paths(fileStatuses);
assertEquals(0,paths.length);
Path path1=new Path("file://foo");
Path path2=new Path("file://moo");
fileStatuses=new FileStatus[]{new FileStatus(3,false,0,0,0,path1),new FileStatus(3,false,0,0,0,path2)};
paths=FileUtil.stat2Paths(fileStatuses);
assertEquals(2,paths.length);
assertEquals(paths[0],path1);
assertEquals(paths[1],path2);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testCopy5() throws IOException {
setupDirs();
URI uri=tmp.toURI();
Configuration conf=new Configuration();
FileSystem fs=FileSystem.newInstance(uri,conf);
final String content="some-content";
File srcFile=createFile(tmp,"src",content);
Path srcPath=new Path(srcFile.toURI());
final File dest=new File(del,"dest");
boolean result=FileUtil.copy(fs,srcPath,dest,false,conf);
assertTrue(result);
assertTrue(dest.exists());
assertEquals(content.getBytes().length + System.getProperty("line.separator").getBytes().length,dest.length());
assertTrue(srcFile.exists());
dest.delete();
assertTrue(!dest.exists());
result=FileUtil.copy(fs,srcPath,dest,true,conf);
assertTrue(result);
assertTrue(dest.exists());
assertEquals(content.getBytes().length + System.getProperty("line.separator").getBytes().length,dest.length());
assertTrue(!srcFile.exists());
dest.delete();
assertTrue(!dest.exists());
srcPath=new Path(partitioned.toURI());
result=FileUtil.copy(fs,srcPath,dest,true,conf);
assertTrue(result);
assertTrue(dest.exists() && dest.isDirectory());
File[] files=dest.listFiles();
assertTrue(files != null);
assertEquals(2,files.length);
for ( File f : files) {
assertEquals(3 + System.getProperty("line.separator").getBytes().length,f.length());
}
assertTrue(!partitioned.exists());
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests if fullyDelete deletes
* (a) dangling symlink to file properly
* (b) dangling symlink to directory properly
* @throws IOException
*/
@Test(timeout=30000) public void testFullyDeleteDanglingSymlinks() throws IOException {
setupDirs();
boolean ret=FileUtil.fullyDelete(tmp);
Assert.assertTrue(ret);
Assert.assertFalse(tmp.exists());
File link=new File(del,LINK);
Assert.assertEquals(5,del.list().length);
ret=FileUtil.fullyDelete(link);
Assert.assertTrue(ret);
Assert.assertEquals(4,del.list().length);
File linkDir=new File(del,"tmpDir");
ret=FileUtil.fullyDelete(linkDir);
Assert.assertTrue(ret);
Assert.assertEquals(3,del.list().length);
}
APIUtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test that getDU is able to handle cycles caused due to symbolic links
* and that directory sizes are not added to the final calculated size
* @throws IOException
*/
@Test(timeout=30000) public void testGetDU() throws Exception {
setupDirs();
long du=FileUtil.getDU(TEST_DIR);
final long expected=2 * (3 + System.getProperty("line.separator").length());
Assert.assertEquals(expected,du);
final File doesNotExist=new File(tmp,"QuickBrownFoxJumpsOverTheLazyDog");
long duDoesNotExist=FileUtil.getDU(doesNotExist);
assertEquals(0,duDoesNotExist);
File notADirectory=new File(partitioned,"part-r-00000");
long duNotADirectoryActual=FileUtil.getDU(notADirectory);
long duNotADirectoryExpected=3 + System.getProperty("line.separator").length();
assertEquals(duNotADirectoryExpected,duNotADirectoryActual);
try {
try {
FileUtil.chmod(notADirectory.getAbsolutePath(),"0000");
}
catch ( InterruptedException ie) {
assertNull(ie);
}
assertFalse(FileUtil.canRead(notADirectory));
final long du3=FileUtil.getDU(partitioned);
assertEquals(expected,du3);
try {
FileUtil.chmod(partitioned.getAbsolutePath(),"0000");
}
catch ( InterruptedException ie) {
assertNull(ie);
}
assertFalse(FileUtil.canRead(partitioned));
final long du4=FileUtil.getDU(partitioned);
assertEquals(0,du4);
}
finally {
FileUtil.chmod(partitioned.getAbsolutePath(),"0777",true);
}
}
APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testListFiles() throws IOException {
setupDirs();
File[] files=FileUtil.listFiles(partitioned);
Assert.assertEquals(2,files.length);
File newDir=new File(tmp.getPath(),"test");
newDir.mkdir();
Assert.assertTrue("Failed to create test dir",newDir.exists());
files=FileUtil.listFiles(newDir);
Assert.assertEquals(0,files.length);
newDir.delete();
Assert.assertFalse("Failed to delete test dir",newDir.exists());
try {
files=FileUtil.listFiles(newDir);
Assert.fail("IOException expected on listFiles() for non-existent dir " + newDir.toString());
}
catch ( IOException ioe) {
}
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=30000) public void testCreateLocalTempFile() throws IOException {
setupDirs();
final File baseFile=new File(tmp,"base");
File tmp1=FileUtil.createLocalTempFile(baseFile,"foo",false);
File tmp2=FileUtil.createLocalTempFile(baseFile,"foo",true);
assertFalse(tmp1.getAbsolutePath().equals(baseFile.getAbsolutePath()));
assertFalse(tmp2.getAbsolutePath().equals(baseFile.getAbsolutePath()));
assertTrue(tmp1.exists() && tmp2.exists());
assertTrue(tmp1.canWrite() && tmp2.canWrite());
assertTrue(tmp1.canRead() && tmp2.canRead());
tmp1.delete();
tmp2.delete();
assertTrue(!tmp1.exists() && !tmp2.exists());
}
APIUtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testCreateJarWithClassPath() throws Exception {
Assert.assertFalse(tmp.exists());
Assert.assertTrue(tmp.mkdirs());
List wildcardMatches=Arrays.asList(new File(tmp,"wildcard1.jar"),new File(tmp,"wildcard2.jar"),new File(tmp,"wildcard3.JAR"),new File(tmp,"wildcard4.JAR"));
for ( File wildcardMatch : wildcardMatches) {
Assert.assertTrue("failure creating file: " + wildcardMatch,wildcardMatch.createNewFile());
}
Assert.assertTrue(new File(tmp,"text.txt").createNewFile());
Assert.assertTrue(new File(tmp,"executable.exe").createNewFile());
Assert.assertTrue(new File(tmp,"README").createNewFile());
String wildcardPath=tmp.getCanonicalPath() + File.separator + "*";
String nonExistentSubdir=tmp.getCanonicalPath() + Path.SEPARATOR + "subdir"+ Path.SEPARATOR;
List classPaths=Arrays.asList("","cp1.jar","cp2.jar",wildcardPath,"cp3.jar",nonExistentSubdir);
String inputClassPath=StringUtils.join(File.pathSeparator,classPaths);
String classPathJar=FileUtil.createJarWithClassPath(inputClassPath,new Path(tmp.getCanonicalPath()),System.getenv());
JarFile jarFile=null;
try {
jarFile=new JarFile(classPathJar);
Manifest jarManifest=jarFile.getManifest();
Assert.assertNotNull(jarManifest);
Attributes mainAttributes=jarManifest.getMainAttributes();
Assert.assertNotNull(mainAttributes);
Assert.assertTrue(mainAttributes.containsKey(Attributes.Name.CLASS_PATH));
String classPathAttr=mainAttributes.getValue(Attributes.Name.CLASS_PATH);
Assert.assertNotNull(classPathAttr);
List expectedClassPaths=new ArrayList();
for ( String classPath : classPaths) {
if (classPath.length() == 0) {
continue;
}
if (wildcardPath.equals(classPath)) {
for ( File wildcardMatch : wildcardMatches) {
expectedClassPaths.add(wildcardMatch.toURI().toURL().toExternalForm());
}
}
else {
File fileCp=null;
if (!new Path(classPath).isAbsolute()) {
fileCp=new File(tmp,classPath);
}
else {
fileCp=new File(classPath);
}
if (nonExistentSubdir.equals(classPath)) {
expectedClassPaths.add(fileCp.toURI().toURL().toExternalForm() + Path.SEPARATOR);
}
else {
expectedClassPaths.add(fileCp.toURI().toURL().toExternalForm());
}
}
}
List actualClassPaths=Arrays.asList(classPathAttr.split(" "));
Collections.sort(expectedClassPaths);
Collections.sort(actualClassPaths);
Assert.assertEquals(expectedClassPaths,actualClassPaths);
}
finally {
if (jarFile != null) {
try {
jarFile.close();
}
catch ( IOException e) {
LOG.warn("exception closing jarFile: " + classPathJar,e);
}
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testGetWithInvalidSourcePathShouldNotDisplayNullInConsole() throws Exception {
Configuration conf=new Configuration();
FsShell shell=new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream out=new PrintStream(bytes);
final PrintStream oldErr=System.err;
System.setErr(out);
final String results;
try {
Path tdir=new Path(TEST_ROOT_DIR,"notNullCopy");
fileSys.delete(tdir,true);
fileSys.mkdirs(tdir);
String[] args=new String[3];
args[0]="-get";
args[1]=new Path(tdir.toUri().getPath(),"/invalidSrc").toString();
args[2]=new Path(tdir.toUri().getPath(),"/invalidDst").toString();
assertTrue("file exists",!fileSys.exists(new Path(args[1])));
assertTrue("file exists",!fileSys.exists(new Path(args[2])));
int run=shell.run(args);
results=bytes.toString();
assertEquals("Return code should be 1",1,run);
assertTrue(" Null is coming when source path is invalid. ",!results.contains("get: null"));
assertTrue(" Not displaying the intended message ",results.contains("get: `" + args[1] + "': No such file or directory"));
}
finally {
IOUtils.closeStream(out);
System.setErr(oldErr);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRmWithNonexistentGlob() throws Exception {
Configuration conf=new Configuration();
FsShell shell=new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream err=new PrintStream(bytes);
final PrintStream oldErr=System.err;
System.setErr(err);
final String results;
try {
int exit=shell.run(new String[]{"-rm","nomatch*"});
assertEquals(1,exit);
results=bytes.toString();
assertTrue(results.contains("rm: `nomatch*': No such file or directory"));
}
finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void pTestCurlyBracket() throws IOException {
Path[] matchedPath;
String[] files;
try {
files=new String[]{USER_DIR + "/a.abcxx",USER_DIR + "/a.abxy",USER_DIR + "/a.hlp",USER_DIR + "/a.jhyy"};
matchedPath=prepareTesting(USER_DIR + "/a.{abc,jh}??",files);
assertEquals(matchedPath.length,2);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[3]);
}
finally {
cleanupDFS();
}
try {
files=new String[]{USER_DIR + "/a.abcxx",USER_DIR + "/a.abdxy",USER_DIR + "/a.hlp",USER_DIR + "/a.jhyy"};
matchedPath=prepareTesting(USER_DIR + "/a.{ab{c,d},jh}??",files);
assertEquals(matchedPath.length,3);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[1]);
assertEquals(matchedPath[2],path[3]);
}
finally {
cleanupDFS();
}
try {
files=new String[]{USER_DIR + "/a/b",USER_DIR + "/a/d",USER_DIR + "/c/b",USER_DIR + "/c/d"};
matchedPath=prepareTesting(USER_DIR + "/{a/b,c/d}",files);
assertEquals(matchedPath.length,2);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[3]);
}
finally {
cleanupDFS();
}
try {
files=new String[]{"/a/b","/a/d","/c/b","/c/d"};
matchedPath=prepareTesting("{/a/b,/c/d}",files);
assertEquals(matchedPath.length,2);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[3]);
}
finally {
cleanupDFS();
}
try {
files=new String[]{USER_DIR + "/}bc",USER_DIR + "/}c"};
matchedPath=prepareTesting(USER_DIR + "/}{a,b}c",files);
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[0]);
matchedPath=prepareTesting(USER_DIR + "/}{b}c",files);
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[0]);
matchedPath=prepareTesting(USER_DIR + "/}{}bc",files);
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[0]);
matchedPath=prepareTesting(USER_DIR + "/}{,}bc",files);
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[0]);
matchedPath=prepareTesting(USER_DIR + "/}{b,}c",files);
assertEquals(matchedPath.length,2);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[1]);
matchedPath=prepareTesting(USER_DIR + "/}{,b}c",files);
assertEquals(matchedPath.length,2);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[1]);
matchedPath=prepareTesting(USER_DIR + "/}{ac,?}",files);
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[1]);
boolean hasException=false;
try {
prepareTesting(USER_DIR + "}{bc",files);
}
catch ( IOException e) {
assertTrue(e.getMessage().startsWith("Illegal file pattern:"));
hasException=true;
}
assertTrue(hasException);
}
finally {
cleanupDFS();
}
}
APIUtilityVerifier EqualityVerifier
@Test public void pTestClosure2() throws IOException {
try {
String[] files=new String[]{USER_DIR + "/a.",USER_DIR + "/a.txt",USER_DIR + "/a.old.java",USER_DIR + "/.java"};
Path[] matchedPath=prepareTesting(USER_DIR + "/a.*",files);
assertEquals(matchedPath.length,3);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[2]);
assertEquals(matchedPath[2],path[1]);
}
finally {
cleanupDFS();
}
}
APIUtilityVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test public void pTestEscape() throws IOException {
org.junit.Assume.assumeTrue(!Path.WINDOWS);
try {
String[] files=new String[]{USER_DIR + "/ab\\[c.d"};
Path[] matchedPath=prepareTesting(USER_DIR + "/ab\\[c.d",files);
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[0]);
}
finally {
cleanupDFS();
}
}
APIUtilityVerifier EqualityVerifier
@Test public void pTestClosure1() throws IOException {
try {
String[] files=new String[]{USER_DIR + "/a",USER_DIR + "/abc",USER_DIR + "/abc.p",USER_DIR + "/bacd"};
Path[] matchedPath=prepareTesting(USER_DIR + "/a*",files);
assertEquals(matchedPath.length,3);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[1]);
assertEquals(matchedPath[2],path[2]);
}
finally {
cleanupDFS();
}
}
APIUtilityVerifier EqualityVerifier
@Test public void pTestClosure4() throws IOException {
try {
String[] files=new String[]{USER_DIR + "/dir1/file1",USER_DIR + "/dir2/file2",USER_DIR + "/dir3/file1"};
Path[] matchedPath=prepareTesting(USER_DIR + "/*/file1",files);
assertEquals(matchedPath.length,2);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[2]);
}
finally {
cleanupDFS();
}
}
APIUtilityVerifier EqualityVerifier
@Test public void pTestJavaRegexSpecialChars() throws IOException {
try {
String[] files=new String[]{USER_DIR + "/($.|+)bc",USER_DIR + "/abc"};
Path[] matchedPath=prepareTesting(USER_DIR + "/($.|+)*",files);
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[0]);
}
finally {
cleanupDFS();
}
}
APIUtilityVerifier EqualityVerifier
@Test public void pTestClosure3() throws IOException {
try {
String[] files=new String[]{USER_DIR + "/a.txt.x",USER_DIR + "/ax",USER_DIR + "/ab37x",USER_DIR + "/bacd"};
Path[] matchedPath=prepareTesting(USER_DIR + "/a*x",files);
assertEquals(matchedPath.length,3);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[2]);
assertEquals(matchedPath[2],path[1]);
}
finally {
cleanupDFS();
}
}
APIUtilityVerifier EqualityVerifier
@Test public void pTestLiteral() throws IOException {
try {
String[] files=new String[]{USER_DIR + "/a2c",USER_DIR + "/abc.d"};
Path[] matchedPath=prepareTesting(USER_DIR + "/abc.d",files);
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[1]);
}
finally {
cleanupDFS();
}
}
APIUtilityVerifier EqualityVerifier
@Test public void pTestSetExcl() throws IOException {
try {
String[] files=new String[]{USER_DIR + "/a.d",USER_DIR + "/a.e",USER_DIR + "/a.0",USER_DIR + "/a.h"};
Path[] matchedPath=prepareTesting(USER_DIR + "/a.[^a-cg-z0-9]",files);
assertEquals(matchedPath.length,2);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[1]);
}
finally {
cleanupDFS();
}
}
APIUtilityVerifier EqualityVerifier
@Test public void pTestCombination() throws IOException {
try {
String[] files=new String[]{"/user/aa/a.c","/user/bb/a.cpp","/user1/cc/b.hlp","/user/dd/a.hxy"};
Path[] matchedPath=prepareTesting("/use?/*/a.[ch]{lp,xy}",files);
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[3]);
}
finally {
cleanupDFS();
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testPathFilter() throws IOException {
try {
String[] files=new String[]{USER_DIR + "/a",USER_DIR + "/a/b"};
Path[] matchedPath=prepareTesting(USER_DIR + "/*/*",files,new RegexPathFilter("^.*" + Pattern.quote(USER_DIR) + "/a/b"));
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[1]);
}
finally {
cleanupDFS();
}
}
APIUtilityVerifier EqualityVerifier
@Test public void pTestRange() throws IOException {
try {
String[] files=new String[]{USER_DIR + "/a.d",USER_DIR + "/a.e",USER_DIR + "/a.f",USER_DIR + "/a.h"};
Path[] matchedPath=prepareTesting(USER_DIR + "/a.[d-fm]",files);
assertEquals(matchedPath.length,3);
assertEquals(matchedPath[0],path[0]);
assertEquals(matchedPath[1],path[1]);
assertEquals(matchedPath[2],path[2]);
}
finally {
cleanupDFS();
}
}
APIUtilityVerifier EqualityVerifier
@Test public void pTestAny() throws IOException {
try {
String[] files=new String[]{USER_DIR + "/abc",USER_DIR + "/a2c",USER_DIR + "/a.c",USER_DIR + "/abcd"};
Path[] matchedPath=prepareTesting(USER_DIR + "/a?c",files);
assertEquals(matchedPath.length,3);
assertEquals(matchedPath[0],path[2]);
assertEquals(matchedPath[1],path[1]);
assertEquals(matchedPath[2],path[0]);
}
finally {
cleanupDFS();
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testPathFilterWithFixedLastComponent() throws IOException {
try {
String[] files=new String[]{USER_DIR + "/a",USER_DIR + "/a/b",USER_DIR + "/c",USER_DIR + "/c/b"};
Path[] matchedPath=prepareTesting(USER_DIR + "/*/b",files,new RegexPathFilter("^.*" + Pattern.quote(USER_DIR) + "/a/b"));
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[1]);
}
finally {
cleanupDFS();
}
}
APIUtilityVerifier EqualityVerifier
@Test public void pTestClosure5() throws IOException {
try {
String[] files=new String[]{USER_DIR + "/dir1/file1",USER_DIR + "/file1"};
Path[] matchedPath=prepareTesting(USER_DIR + "/*/file1",files);
assertEquals(matchedPath.length,1);
assertEquals(matchedPath[0],path[0]);
}
finally {
cleanupDFS();
}
}
APIUtilityVerifier EqualityVerifier
@Test public void pTestSet() throws IOException {
try {
String[] files=new String[]{USER_DIR + "/a.c",USER_DIR + "/a.cpp",USER_DIR + "/a.hlp",USER_DIR + "/a.hxy"};
Path[] matchedPath=prepareTesting(USER_DIR + "/a.[ch]??",files);
assertEquals(matchedPath.length,3);
assertEquals(matchedPath[0],path[1]);
assertEquals(matchedPath[1],path[2]);
assertEquals(matchedPath[2],path[3]);
}
finally {
cleanupDFS();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMakeQualifiedPath() throws Exception {
String harPathWithUserinfo="har://file-user:passwd@localhost:80" + harPath.toUri().getPath().toString();
Path path=new Path(harPathWithUserinfo);
Path qualifiedPath=path.getFileSystem(conf).makeQualified(path);
assertTrue(String.format("The qualified path (%s) did not match the expected path (%s).",qualifiedPath.toString(),harPathWithUserinfo),qualifiedPath.toString().equals(harPathWithUserinfo));
}
APIUtilityVerifier EqualityVerifier
/**
* Test createHardLinkMult() with empty list of files.
* We use an extended version of the method call, that
* returns the number of System exec calls made, which should
* be zero in this case.
*/
@Test public void testCreateHardLinkMultEmptyList() throws IOException {
String[] emptyList={};
int callCount=createHardLinkMult(src,emptyList,tgt_mult,getMaxAllowedCmdArgLength());
assertEquals(0,callCount);
validateSetup();
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test createHardLinkMult(), again, this time with the "too long list"
* case where the total size of the command line arguments exceed the
* allowed maximum. In this case, the list should be automatically
* broken up into chunks, each chunk no larger than the max allowed.
* We use an extended version of the method call, specifying the
* size limit explicitly, to simulate the "too long" list with a
* relatively short list.
*/
@Test public void testCreateHardLinkMultOversizeAndEmpty() throws IOException {
String name1="x11111111";
String name2="x22222222";
String name3="x33333333";
File x1_long=new File(src,name1);
File x2_long=new File(src,name2);
File x3_long=new File(src,name3);
x1.renameTo(x1_long);
x2.renameTo(x2_long);
x3.renameTo(x3_long);
assertTrue(x1_long.exists());
assertTrue(x2_long.exists());
assertTrue(x3_long.exists());
assertFalse(x1.exists());
assertFalse(x2.exists());
assertFalse(x3.exists());
int callCount;
String[] emptyList={};
String[] fileNames=src.list();
int overhead=getLinkMultArgLength(src,emptyList,tgt_mult);
int maxLength=overhead + (int)(2.5 * (float)(1 + name1.length()));
callCount=createHardLinkMult(src,fileNames,tgt_mult,maxLength);
assertEquals(2,callCount);
String[] tgt_multNames=tgt_mult.list();
Arrays.sort(fileNames);
Arrays.sort(tgt_multNames);
assertArrayEquals(fileNames,tgt_multNames);
FileUtil.fullyDelete(tgt_mult);
assertFalse(tgt_mult.exists());
tgt_mult.mkdirs();
assertTrue(tgt_mult.exists() && tgt_mult.list().length == 0);
maxLength=overhead + (int)(0.5 * (float)(1 + name1.length()));
callCount=createHardLinkMult(src,fileNames,tgt_mult,maxLength);
assertEquals(3,callCount);
tgt_multNames=tgt_mult.list();
Arrays.sort(fileNames);
Arrays.sort(tgt_multNames);
assertArrayEquals(fileNames,tgt_multNames);
}
APIUtilityVerifier BooleanVerifier
/**
* Two buffer dirs. The first dir exists & is on a read-only disk;
* The second dir exists & is RW
* @throws Exception
*/
@Test(timeout=30000) public void testROBufferDirAndRWBufferDir() throws Exception {
if (isWindows) return;
String dir1=buildBufferDir(ROOT,1);
String dir2=buildBufferDir(ROOT,2);
try {
conf.set(CONTEXT,dir1 + "," + dir2);
assertTrue(localFs.mkdirs(new Path(dir2)));
BUFFER_ROOT.setReadOnly();
validateTempDirCreation(dir2);
validateTempDirCreation(dir2);
}
finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testCreateManyFiles() throws Exception {
if (isWindows) return;
String dir5=buildBufferDir(ROOT,5);
String dir6=buildBufferDir(ROOT,6);
try {
conf.set(CONTEXT,dir5 + "," + dir6);
assertTrue(localFs.mkdirs(new Path(dir5)));
assertTrue(localFs.mkdirs(new Path(dir6)));
int inDir5=0, inDir6=0;
for (int i=0; i < TRIALS; ++i) {
File result=createTempFile();
if (result.getPath().startsWith(new Path(dir5,FILENAME).toUri().getPath())) {
inDir5++;
}
else if (result.getPath().startsWith(new Path(dir6,FILENAME).toUri().getPath())) {
inDir6++;
}
result.delete();
}
assertTrue(inDir5 + inDir6 == TRIALS);
}
finally {
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
/**
* Test that {@link LocalDirAllocator#getAllLocalPathsToRead(String,Configuration)}
* returns correct filenames and "file" schema.
* @throws IOException
*/
@Test(timeout=30000) public void testGetAllLocalPathsToRead() throws IOException {
assumeTrue(!isWindows);
String dir0=buildBufferDir(ROOT,0);
String dir1=buildBufferDir(ROOT,1);
try {
conf.set(CONTEXT,dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir0)));
assertTrue(localFs.mkdirs(new Path(dir1)));
localFs.create(new Path(dir0 + Path.SEPARATOR + FILENAME));
localFs.create(new Path(dir1 + Path.SEPARATOR + FILENAME));
final Iterable pathIterable=dirAllocator.getAllLocalPathsToRead(FILENAME,conf);
int count=0;
for ( final Path p : pathIterable) {
count++;
assertEquals(FILENAME,p.getName());
assertEquals("file",p.getFileSystem(conf).getUri().getScheme());
}
assertEquals(2,count);
try {
Path p=pathIterable.iterator().next();
assertFalse("NoSuchElementException must be thrown, but returned [" + p + "] instead.",true);
}
catch ( NoSuchElementException nsee) {
}
final Iterable pathIterable2=dirAllocator.getAllLocalPathsToRead(FILENAME,conf);
final Iterator it=pathIterable2.iterator();
try {
it.remove();
assertFalse(true);
}
catch ( UnsupportedOperationException uoe) {
}
}
finally {
Shell.execCommand(new String[]{"chmod","u+w",BUFFER_DIR_ROOT});
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
/**
* Test no side effect files are left over. After creating a temp
* temp file, remove both the temp file and its parent. Verify that
* no files or directories are left over as can happen when File objects
* are mistakenly created from fully qualified path strings.
* @throws IOException
*/
@Test(timeout=30000) public void testNoSideEffects() throws IOException {
assumeTrue(!isWindows);
String dir=buildBufferDir(ROOT,0);
try {
conf.set(CONTEXT,dir);
File result=dirAllocator.createTmpFileForWrite(FILENAME,-1,conf);
assertTrue(result.delete());
assertTrue(result.getParentFile().delete());
assertFalse(new File(dir).exists());
}
finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
/**
* Test getLocalPathToRead() returns correct filename and "file" schema.
* @throws IOException
*/
@Test(timeout=30000) public void testGetLocalPathToRead() throws IOException {
assumeTrue(!isWindows);
String dir=buildBufferDir(ROOT,0);
try {
conf.set(CONTEXT,dir);
assertTrue(localFs.mkdirs(new Path(dir)));
File f1=dirAllocator.createTmpFileForWrite(FILENAME,SMALL_FILE_SIZE,conf);
Path p1=dirAllocator.getLocalPathToRead(f1.getName(),conf);
assertEquals(f1.getName(),p1.getName());
assertEquals("file",p1.getFileSystem(conf).getUri().getScheme());
}
finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Two buffer dirs. Both exists and on a R/W disk.
* Later disk1 becomes read-only.
* @throws Exception
*/
@Test(timeout=30000) public void testRWBufferDirBecomesRO() throws Exception {
if (isWindows) return;
String dir3=buildBufferDir(ROOT,3);
String dir4=buildBufferDir(ROOT,4);
try {
conf.set(CONTEXT,dir3 + "," + dir4);
assertTrue(localFs.mkdirs(new Path(dir3)));
assertTrue(localFs.mkdirs(new Path(dir4)));
createTempFile(SMALL_FILE_SIZE);
int nextDirIdx=(dirAllocator.getCurrentDirectoryIndex() == 0) ? 3 : 4;
validateTempDirCreation(buildBufferDir(ROOT,nextDirIdx));
new File(new Path(dir4).toUri().getPath()).setReadOnly();
validateTempDirCreation(dir3);
validateTempDirCreation(dir3);
}
finally {
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Two buffer dirs. The first dir does not exist & is on a read-only disk;
* The second dir exists & is RW
* getLocalPathForWrite with checkAccess set to false should create a parent
* directory. With checkAccess true, the directory should not be created.
* @throws Exception
*/
@Test(timeout=30000) public void testLocalPathForWriteDirCreation() throws IOException {
String dir0=buildBufferDir(ROOT,0);
String dir1=buildBufferDir(ROOT,1);
try {
conf.set(CONTEXT,dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir1)));
BUFFER_ROOT.setReadOnly();
Path p1=dirAllocator.getLocalPathForWrite("p1/x",SMALL_FILE_SIZE,conf);
assertTrue(localFs.getFileStatus(p1.getParent()).isDirectory());
Path p2=dirAllocator.getLocalPathForWrite("p2/x",SMALL_FILE_SIZE,conf,false);
try {
localFs.getFileStatus(p2.getParent());
}
catch ( Exception e) {
assertEquals(e.getClass(),FileNotFoundException.class);
}
}
finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
APIUtilityVerifier BooleanVerifier
/**
* Two buffer dirs. The first dir does not exist & is on a read-only disk;
* The second dir exists & is RW
* @throws Exception
*/
@Test(timeout=30000) public void test0() throws Exception {
if (isWindows) return;
String dir0=buildBufferDir(ROOT,0);
String dir1=buildBufferDir(ROOT,1);
try {
conf.set(CONTEXT,dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir1)));
BUFFER_ROOT.setReadOnly();
validateTempDirCreation(dir1);
validateTempDirCreation(dir1);
}
finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testReportChecksumFailure() throws IOException {
base.mkdirs();
assertTrue(base.exists() && base.isDirectory());
final File dir1=new File(base,"dir1");
final File dir2=new File(dir1,"dir2");
dir2.mkdirs();
assertTrue(dir2.exists() && FileUtil.canWrite(dir2));
final String dataFileName="corruptedData";
final Path dataPath=new Path(new File(dir2,dataFileName).toURI());
final Path checksumPath=fileSys.getChecksumFile(dataPath);
final FSDataOutputStream fsdos=fileSys.create(dataPath);
try {
fsdos.writeUTF("foo");
}
finally {
fsdos.close();
}
assertTrue(fileSys.pathToFile(dataPath).exists());
final long dataFileLength=fileSys.getFileStatus(dataPath).getLen();
assertTrue(dataFileLength > 0);
assertTrue(fileSys.pathToFile(checksumPath).exists());
final long checksumFileLength=fileSys.getFileStatus(checksumPath).getLen();
assertTrue(checksumFileLength > 0);
FileUtil.setWritable(base,false);
FSDataInputStream dataFsdis=fileSys.open(dataPath);
FSDataInputStream checksumFsdis=fileSys.open(checksumPath);
boolean retryIsNecessary=fileSys.reportChecksumFailure(dataPath,dataFsdis,0,checksumFsdis,0);
assertTrue(!retryIsNecessary);
assertTrue(!fileSys.pathToFile(dataPath).exists());
assertTrue(!fileSys.pathToFile(checksumPath).exists());
File[] dir1files=dir1.listFiles(new FileFilter(){
@Override public boolean accept( File pathname){
return pathname != null && !pathname.getName().equals("dir2");
}
}
);
assertTrue(dir1files != null);
assertTrue(dir1files.length == 1);
File badFilesDir=dir1files[0];
File[] badFiles=badFilesDir.listFiles();
assertTrue(badFiles != null);
assertTrue(badFiles.length == 2);
boolean dataFileFound=false;
boolean checksumFileFound=false;
for ( File badFile : badFiles) {
if (badFile.getName().startsWith(dataFileName)) {
assertTrue(dataFileLength == badFile.length());
dataFileFound=true;
}
else if (badFile.getName().contains(dataFileName + ".crc")) {
assertTrue(checksumFileLength == badFile.length());
checksumFileFound=true;
}
}
assertTrue(dataFileFound);
assertTrue(checksumFileFound);
}
APIUtilityVerifier InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test public void testListStatusReturnConsistentPathOnWindows() throws IOException {
assumeTrue(Shell.WINDOWS);
String dirNoDriveSpec=TEST_ROOT_DIR;
if (dirNoDriveSpec.charAt(1) == ':') dirNoDriveSpec=dirNoDriveSpec.substring(2);
File file=new File(dirNoDriveSpec,"foo");
file.mkdirs();
FileStatus[] stats=fileSys.listStatus(new Path(dirNoDriveSpec));
assertEquals("Unexpected number of stats",1,stats.length);
assertEquals("Bad path from stat",new Path(file.getPath()).toUri().getPath(),stats[0].getPath().toUri().getPath());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the capability of setting the working directory.
*/
@Test(timeout=10000) public void testWorkingDirectory() throws IOException {
Path origDir=fileSys.getWorkingDirectory();
Path subdir=new Path(TEST_ROOT_DIR,"new");
try {
assertTrue(!fileSys.exists(subdir));
assertTrue(fileSys.mkdirs(subdir));
assertTrue(fileSys.isDirectory(subdir));
fileSys.setWorkingDirectory(subdir);
Path dir1=new Path("dir1");
assertTrue(fileSys.mkdirs(dir1));
assertTrue(fileSys.isDirectory(dir1));
fileSys.delete(dir1,true);
assertTrue(!fileSys.exists(dir1));
Path file1=new Path("file1");
Path file2=new Path("sub/file2");
String contents=writeFile(fileSys,file1,1);
fileSys.copyFromLocalFile(file1,file2);
assertTrue(fileSys.exists(file1));
assertTrue(fileSys.isFile(file1));
cleanupFile(fileSys,file2);
fileSys.copyToLocalFile(file1,file2);
cleanupFile(fileSys,file2);
fileSys.rename(file1,file2);
assertTrue(!fileSys.exists(file1));
assertTrue(fileSys.exists(file2));
fileSys.rename(file2,file1);
InputStream stm=fileSys.open(file1);
byte[] buffer=new byte[3];
int bytesRead=stm.read(buffer,0,3);
assertEquals(contents,new String(buffer,0,bytesRead));
stm.close();
}
finally {
fileSys.setWorkingDirectory(origDir);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testStripFragmentFromPath() throws Exception {
FileSystem fs=FileSystem.getLocal(new Configuration());
Path pathQualified=TEST_PATH.makeQualified(fs.getUri(),fs.getWorkingDirectory());
Path pathWithFragment=new Path(new URI(pathQualified.toString() + "#glacier"));
FileSystemTestHelper.createFile(fs,pathWithFragment);
Path resolved=fs.resolvePath(pathWithFragment);
assertEquals("resolvePath did not strip fragment from Path",pathQualified,resolved);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test(timeout=10000) public void testStat() throws Exception {
Assume.assumeTrue(Stat.isAvailable());
FileSystem fs=FileSystem.getLocal(new Configuration());
Path testDir=new Path(getTestRootPath(fs),"teststat");
fs.mkdirs(testDir);
Path sub1=new Path(testDir,"sub1");
Path sub2=new Path(testDir,"sub2");
fs.mkdirs(sub1);
fs.createSymlink(sub1,sub2,false);
FileStatus stat1=new Stat(sub1,4096l,false,fs).getFileStatus();
FileStatus stat2=new Stat(sub2,0,false,fs).getFileStatus();
assertTrue(stat1.isDirectory());
assertFalse(stat2.isDirectory());
fs.delete(testDir,true);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testSetPermissionAffectsTarget() throws IOException {
Path file=new Path(testBaseDir1(),"file");
Path dir=new Path(testBaseDir2());
Path linkToFile=new Path(testBaseDir1(),"linkToFile");
Path linkToDir=new Path(testBaseDir1(),"linkToDir");
createAndWriteFile(file);
wrapper.createSymlink(file,linkToFile,false);
wrapper.createSymlink(dir,linkToDir,false);
FsPermission perms=wrapper.getFileLinkStatus(linkToFile).getPermission();
wrapper.setPermission(linkToFile,new FsPermission((short)0664));
wrapper.setOwner(linkToFile,"user","group");
assertEquals(perms,wrapper.getFileLinkStatus(linkToFile).getPermission());
FileStatus stat=wrapper.getFileStatus(file);
assertEquals(0664,stat.getPermission().toShort());
assertEquals("user",stat.getOwner());
assertEquals("group",stat.getGroup());
assertEquals(stat.getPermission(),wrapper.getFileStatus(linkToFile).getPermission());
perms=wrapper.getFileLinkStatus(linkToDir).getPermission();
wrapper.setPermission(linkToDir,new FsPermission((short)0664));
wrapper.setOwner(linkToDir,"user","group");
assertEquals(perms,wrapper.getFileLinkStatus(linkToDir).getPermission());
stat=wrapper.getFileStatus(dir);
assertEquals(0664,stat.getPermission().toShort());
assertEquals("user",stat.getOwner());
assertEquals("group",stat.getGroup());
assertEquals(stat.getPermission(),wrapper.getFileStatus(linkToDir).getPermission());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testCreateLinkMaxPathLink() throws IOException {
Path dir=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
final int maxPathLen=HdfsConstants.MAX_PATH_LENGTH;
final int dirLen=dir.toString().length() + 1;
int len=maxPathLen - dirLen;
StringBuilder sb=new StringBuilder("");
for (int i=0; i < (len / 10); i++) {
sb.append("0123456789");
}
for (int i=0; i < (len % 10); i++) {
sb.append("x");
}
Path link=new Path(sb.toString());
assertEquals(maxPathLen,dirLen + link.toString().length());
createAndWriteFile(file);
wrapper.setWorkingDirectory(dir);
wrapper.createSymlink(file,link,false);
readFile(link);
link=new Path(sb.toString() + "x");
try {
wrapper.createSymlink(file,link,false);
fail("Path name should be too long");
}
catch ( IOException x) {
}
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=10000) public void testLinkOwner() throws IOException {
Path file=new Path(testBaseDir1(),"file");
Path link=new Path(testBaseDir1(),"symlinkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file,link,false);
FileStatus statFile=wrapper.getFileStatus(file);
FileStatus statLink=wrapper.getFileStatus(link);
assertEquals(statLink.getOwner(),statFile.getOwner());
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=10000) public void testRecoverLease() throws IOException {
Path dir=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path link=new Path(testBaseDir1(),"link");
wrapper.setWorkingDirectory(dir);
createAndWriteFile(file);
wrapper.createSymlink(file,link,false);
boolean closed=dfs.recoverLease(link);
assertTrue("Expected recoverLease to return true",closed);
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=10000) public void testIsFileClosed() throws IOException {
Path dir=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path link=new Path(testBaseDir1(),"link");
wrapper.setWorkingDirectory(dir);
createAndWriteFile(file);
wrapper.createSymlink(file,link,false);
boolean closed=dfs.isFileClosed(link);
assertTrue("Expected isFileClosed to return true",closed);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testDanglingLink() throws IOException {
assumeTrue(!Path.WINDOWS);
Path fileAbs=new Path(testBaseDir1() + "/file");
Path fileQual=new Path(testURI().toString(),fileAbs);
Path link=new Path(testBaseDir1() + "/linkToFile");
Path linkQual=new Path(testURI().toString(),link.toString());
wrapper.createSymlink(fileAbs,link,false);
FileUtil.fullyDelete(new File(link.toUri().getPath()));
wrapper.createSymlink(fileAbs,link,false);
try {
wrapper.getFileStatus(link);
fail("Got FileStatus for dangling link");
}
catch ( FileNotFoundException f) {
}
UserGroupInformation user=UserGroupInformation.getCurrentUser();
FileStatus fsd=wrapper.getFileLinkStatus(link);
assertEquals(fileQual,fsd.getSymlink());
assertTrue(fsd.isSymlink());
assertFalse(fsd.isDirectory());
assertEquals(user.getUserName(),fsd.getOwner());
assertEquals(user.getGroupNames()[0],fsd.getGroup());
assertEquals(linkQual,fsd.getPath());
try {
readFile(link);
fail("Got FileStatus for dangling link");
}
catch ( FileNotFoundException f) {
}
createAndWriteFile(fileAbs);
wrapper.getFileStatus(link);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test opening and reading from an InputStream through a hdfs:// URL.
*
* First generate a file with some content through the FileSystem API, then
* try to open and read the file through the URL stream API.
* @throws IOException
*/
@Test public void testDfsUrls() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
FsUrlStreamHandlerFactory factory=new org.apache.hadoop.fs.FsUrlStreamHandlerFactory();
java.net.URL.setURLStreamHandlerFactory(factory);
Path filePath=new Path("/thefile");
try {
byte[] fileContent=new byte[1024];
for (int i=0; i < fileContent.length; ++i) fileContent[i]=(byte)i;
OutputStream os=fs.create(filePath);
os.write(fileContent);
os.close();
URI uri=fs.getUri();
URL fileURL=new URL(uri.getScheme(),uri.getHost(),uri.getPort(),filePath.toString());
InputStream is=fileURL.openStream();
assertNotNull(is);
byte[] bytes=new byte[4096];
assertEquals(1024,is.read(bytes));
is.close();
for (int i=0; i < fileContent.length; ++i) assertEquals(fileContent[i],bytes[i]);
fs.delete(filePath,false);
}
finally {
fs.close();
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test opening and reading from an InputStream through a file:// URL.
* @throws IOException
* @throws URISyntaxException
*/
@Test public void testFileUrls() throws IOException, URISyntaxException {
Configuration conf=new HdfsConfiguration();
if (!TEST_ROOT_DIR.exists()) {
if (!TEST_ROOT_DIR.mkdirs()) throw new IOException("Cannot create temporary directory: " + TEST_ROOT_DIR);
}
File tmpFile=new File(TEST_ROOT_DIR,"thefile");
URI uri=tmpFile.toURI();
FileSystem fs=FileSystem.get(uri,conf);
try {
byte[] fileContent=new byte[1024];
for (int i=0; i < fileContent.length; ++i) fileContent[i]=(byte)i;
OutputStream os=fs.create(new Path(uri.getPath()));
os.write(fileContent);
os.close();
URL fileURL=uri.toURL();
InputStream is=fileURL.openStream();
assertNotNull(is);
byte[] bytes=new byte[4096];
assertEquals(1024,is.read(bytes));
is.close();
for (int i=0; i < fileContent.length; ++i) assertEquals(fileContent[i],bytes[i]);
fs.delete(new Path(uri.getPath()),false);
}
finally {
fs.close();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testStatistics() throws Exception {
FileSystem.clearStatistics();
FileSystem.Statistics stats=FileSystem.getStatistics("wasb",NativeAzureFileSystem.class);
assertEquals(0,stats.getBytesRead());
assertEquals(0,stats.getBytesWritten());
Path newFile=new Path("testStats");
writeString(newFile,"12345678");
assertEquals(8,stats.getBytesWritten());
assertEquals(0,stats.getBytesRead());
String readBack=readString(newFile);
assertEquals("12345678",readBack);
assertEquals(8,stats.getBytesRead());
assertEquals(8,stats.getBytesWritten());
assertTrue(fs.delete(newFile,true));
assertEquals(8,stats.getBytesRead());
assertEquals(8,stats.getBytesWritten());
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testReadOOBWrites() throws Exception {
byte[] dataBlockWrite=new byte[UPLOAD_BLOCK_SIZE];
byte[] dataBlockRead=new byte[UPLOAD_BLOCK_SIZE];
DataOutputStream outputStream=testAccount.getStore().storefile("WASB_String.txt",new PermissionStatus("","",FsPermission.getDefault()));
Arrays.fill(dataBlockWrite,(byte)255);
for (int i=0; i < NUMBER_OF_BLOCKS; i++) {
outputStream.write(dataBlockWrite);
}
outputStream.flush();
outputStream.close();
DataBlockWriter writeBlockTask=new DataBlockWriter(testAccount,"WASB_String.txt");
writeBlockTask.startWriting();
int count=0;
DataInputStream inputStream=null;
for (int i=0; i < 5; i++) {
try {
inputStream=testAccount.getStore().retrieve("WASB_String.txt",0);
count=0;
int c=0;
while (c >= 0) {
c=inputStream.read(dataBlockRead,0,UPLOAD_BLOCK_SIZE);
if (c < 0) {
break;
}
count+=c;
}
}
catch ( IOException e) {
System.out.println(e.getCause().toString());
e.printStackTrace();
fail();
}
if (null != inputStream) {
inputStream.close();
}
}
writeBlockTask.stopWriting();
assertEquals(NUMBER_OF_BLOCKS * UPLOAD_BLOCK_SIZE,count);
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests that WASB understands the old-style ASV metadata and changes it when
* it gets the chance.
*/
@Test public void testOldPermissionMetadata() throws Exception {
Path selfishFile=new Path("/noOneElse");
HashMap metadata=new HashMap();
metadata.put("asv_permission",getExpectedPermissionString("rw-------"));
backingStore.setContent(AzureBlobStorageTestAccount.toMockUri(selfishFile),new byte[]{},metadata);
FsPermission justMe=new FsPermission(FsAction.READ_WRITE,FsAction.NONE,FsAction.NONE);
FileStatus retrievedStatus=fs.getFileStatus(selfishFile);
assertNotNull(retrievedStatus);
assertEquals(justMe,retrievedStatus.getPermission());
assertEquals(getExpectedOwner(),retrievedStatus.getOwner());
assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,retrievedStatus.getGroup());
FsPermission meAndYou=new FsPermission(FsAction.READ_WRITE,FsAction.READ_WRITE,FsAction.NONE);
fs.setPermission(selfishFile,meAndYou);
metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile));
assertNotNull(metadata);
String storedPermission=metadata.get("hdi_permission");
assertEquals(getExpectedPermissionString("rw-rw----"),storedPermission);
assertNull(metadata.get("asv_permission"));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testFolderMetadata() throws Exception {
Path folder=new Path("/folder");
FsPermission justRead=new FsPermission(FsAction.READ,FsAction.READ,FsAction.READ);
fs.mkdirs(folder,justRead);
HashMap metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(folder));
assertNotNull(metadata);
assertEquals("true",metadata.get("hdi_isfolder"));
assertEquals(getExpectedPermissionString("r--r--r--"),metadata.get("hdi_permission"));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("deprecation") @Test public void testPermissionMetadata() throws Exception {
FsPermission justMe=new FsPermission(FsAction.READ_WRITE,FsAction.NONE,FsAction.NONE);
Path selfishFile=new Path("/noOneElse");
fs.create(selfishFile,justMe,true,4096,fs.getDefaultReplication(),fs.getDefaultBlockSize(),null).close();
HashMap metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile));
assertNotNull(metadata);
String storedPermission=metadata.get("hdi_permission");
assertEquals(getExpectedPermissionString("rw-------"),storedPermission);
FileStatus retrievedStatus=fs.getFileStatus(selfishFile);
assertNotNull(retrievedStatus);
assertEquals(justMe,retrievedStatus.getPermission());
assertEquals(getExpectedOwner(),retrievedStatus.getOwner());
assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,retrievedStatus.getGroup());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests that WASB works well with an older version container with ASV-era
* version and metadata.
*/
@Test public void testFirstContainerVersionMetadata() throws Exception {
HashMap containerMetadata=new HashMap();
containerMetadata.put(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY,AzureNativeFileSystemStore.FIRST_WASB_VERSION);
FsWithPreExistingContainer fsWithContainer=FsWithPreExistingContainer.create(containerMetadata);
assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist")));
assertEquals(0,fsWithContainer.getFs().listStatus(new Path("/")).length);
assertEquals(AzureNativeFileSystemStore.FIRST_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY));
assertNull(fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
fsWithContainer.getFs().mkdirs(new Path("/dir"));
assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
assertNull(fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY));
fsWithContainer.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests that WASB stamped the version in the container metadata if it does a
* write operation to a pre-existing container.
*/
@Test public void testPreExistingContainerVersionMetadata() throws Exception {
FsWithPreExistingContainer fsWithContainer=FsWithPreExistingContainer.create();
assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist")));
assertEquals(0,fsWithContainer.getFs().listStatus(new Path("/")).length);
assertNull(fsWithContainer.getContainerMetadata());
fsWithContainer.getFs().mkdirs(new Path("/dir"));
assertNotNull(fsWithContainer.getContainerMetadata());
assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
fsWithContainer.close();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNumberOfBlocks() throws Exception {
Configuration conf=new Configuration();
conf.set(NativeAzureFileSystem.AZURE_BLOCK_SIZE_PROPERTY_NAME,"500");
AzureBlobStorageTestAccount testAccount=AzureBlobStorageTestAccount.createMock(conf);
FileSystem fs=testAccount.getFileSystem();
Path testFile=createTestFile(fs,1200);
FileStatus stat=fs.getFileStatus(testFile);
assertEquals(500,stat.getBlockSize());
testAccount.cleanup();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testLinkBlobs() throws Exception {
Path filePath=new Path("/inProgress");
FSDataOutputStream outputStream=fs.create(filePath);
HashMap metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(filePath));
assertNotNull(metadata);
String linkValue=metadata.get(AzureNativeFileSystemStore.LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY);
assertNotNull(linkValue);
assertTrue(backingStore.exists(AzureBlobStorageTestAccount.toMockUri(linkValue)));
assertTrue(fs.exists(filePath));
outputStream.close();
metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(filePath));
assertNull(metadata.get(AzureNativeFileSystemStore.LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testDefaultKeyProvider() throws Exception {
Configuration conf=new Configuration();
String account="testacct";
String key="testkey";
conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account,key);
String result=AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account,conf);
assertEquals(key,result);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testValidKeyProvider() throws Exception {
Configuration conf=new Configuration();
String account="testacct";
String key="testkey";
conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account,key);
conf.setClass("fs.azure.account.keyprovider." + account,SimpleKeyProvider.class,KeyProvider.class);
String result=AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account,conf);
assertEquals(key,result);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests the cases when the URI is specified with no authority, i.e.
* wasb:///path/to/file.
*/
@Test public void testNoUriAuthority() throws Exception {
String[] wasbAliases=new String[]{"wasb","wasbs"};
for ( String defaultScheme : wasbAliases) {
for ( String wantedScheme : wasbAliases) {
testAccount=AzureBlobStorageTestAccount.createMock();
Configuration conf=testAccount.getFileSystem().getConf();
String authority=testAccount.getFileSystem().getUri().getAuthority();
URI defaultUri=new URI(defaultScheme,authority,null,null,null);
conf.set("fs.default.name",defaultUri.toString());
URI wantedUri=new URI(wantedScheme + ":///random/path");
NativeAzureFileSystem obtained=(NativeAzureFileSystem)FileSystem.get(wantedUri,conf);
assertNotNull(obtained);
assertEquals(new URI(wantedScheme,authority,null,null,null),obtained.getUri());
Path qualified=obtained.makeQualified(new Path(wantedUri));
assertEquals(new URI(wantedScheme,authority,wantedUri.getPath(),null,null),qualified.toUri());
testAccount.cleanup();
FileSystem.closeAll();
}
}
testAccount=AzureBlobStorageTestAccount.createMock();
Configuration conf=testAccount.getFileSystem().getConf();
conf.set("fs.default.name","file:///");
try {
FileSystem.get(new URI("wasb:///random/path"),conf);
fail("Should've thrown.");
}
catch ( IllegalArgumentException e) {
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMetricsOnFileCreateRead() throws Exception {
long base=getBaseWebResponses();
assertEquals(0,AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
Path filePath=new Path("/metricsTest_webResponses");
final int FILE_SIZE=1000;
getBandwidthGaugeUpdater().suppressAutoUpdate();
Date start=new Date();
OutputStream outputStream=fs.create(filePath);
outputStream.write(nonZeroByteArray(FILE_SIZE));
outputStream.close();
long uploadDurationMs=new Date().getTime() - start.getTime();
logOpResponseCount("Creating a 1K file",base);
base=assertWebResponsesInRange(base,2,15);
getBandwidthGaugeUpdater().triggerUpdate(true);
long bytesWritten=AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation());
assertTrue("The bytes written in the last second " + bytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",bytesWritten > (FILE_SIZE / 2) && bytesWritten < (FILE_SIZE * 2));
long totalBytesWritten=AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
assertTrue("The total bytes written " + totalBytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
long uploadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_RATE);
System.out.println("Upload rate: " + uploadRate + " bytes/second.");
long expectedRate=(FILE_SIZE * 1000L) / uploadDurationMs;
assertTrue("The upload rate " + uploadRate + " is below the expected range of around "+ expectedRate+ " bytes/second that the unit test observed. This should never be"+ " the case since the test underestimates the rate by looking at "+ " end-to-end time instead of just block upload time.",uploadRate >= expectedRate);
long uploadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_LATENCY);
System.out.println("Upload latency: " + uploadLatency);
long expectedLatency=uploadDurationMs;
assertTrue("The upload latency " + uploadLatency + " should be greater than zero now that I've just uploaded a file.",uploadLatency > 0);
assertTrue("The upload latency " + uploadLatency + " is more than the expected range of around "+ expectedLatency+ " milliseconds that the unit test observed. This should never be"+ " the case since the test overestimates the latency by looking at "+ " end-to-end time instead of just block upload time.",uploadLatency <= expectedLatency);
start=new Date();
InputStream inputStream=fs.open(filePath);
int count=0;
while (inputStream.read() >= 0) {
count++;
}
inputStream.close();
long downloadDurationMs=new Date().getTime() - start.getTime();
assertEquals(FILE_SIZE,count);
logOpResponseCount("Reading a 1K file",base);
base=assertWebResponsesInRange(base,1,10);
getBandwidthGaugeUpdater().triggerUpdate(false);
long totalBytesRead=AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
assertEquals(FILE_SIZE,totalBytesRead);
long bytesRead=AzureMetricsTestUtil.getCurrentBytesRead(getInstrumentation());
assertTrue("The bytes read in the last second " + bytesRead + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",bytesRead > (FILE_SIZE / 2) && bytesRead < (FILE_SIZE * 2));
long downloadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_RATE);
System.out.println("Download rate: " + downloadRate + " bytes/second.");
expectedRate=(FILE_SIZE * 1000L) / downloadDurationMs;
assertTrue("The download rate " + downloadRate + " is below the expected range of around "+ expectedRate+ " bytes/second that the unit test observed. This should never be"+ " the case since the test underestimates the rate by looking at "+ " end-to-end time instead of just block download time.",downloadRate >= expectedRate);
long downloadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_LATENCY);
System.out.println("Download latency: " + downloadLatency);
expectedLatency=downloadDurationMs;
assertTrue("The download latency " + downloadLatency + " should be greater than zero now that I've just downloaded a file.",downloadLatency > 0);
assertTrue("The download latency " + downloadLatency + " is more than the expected range of around "+ expectedLatency+ " milliseconds that the unit test observed. This should never be"+ " the case since the test overestimates the latency by looking at "+ " end-to-end time instead of just block download time.",downloadLatency <= expectedLatency);
assertNoErrors();
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSingleThreaded() throws Exception {
AzureFileSystemInstrumentation instrumentation=new AzureFileSystemInstrumentation(new Configuration());
BandwidthGaugeUpdater updater=new BandwidthGaugeUpdater(instrumentation,1000,true);
updater.triggerUpdate(true);
assertEquals(0,AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation));
updater.blockUploaded(new Date(),new Date(),150);
updater.triggerUpdate(true);
assertEquals(150,AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation));
updater.blockUploaded(new Date(new Date().getTime() - 10000),new Date(),200);
updater.triggerUpdate(true);
long currentBytes=AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation);
assertTrue("We expect around (200/10 = 20) bytes written as the gauge value." + "Got " + currentBytes,currentBytes > 18 && currentBytes < 22);
updater.close();
}
APIUtilityVerifier UtilityVerifier
@Test public void testDeleteNonEmptyDirNonRecursive() throws Throwable {
Path path=path("testDeleteNonEmptyDirNonRecursive");
mkdirs(path);
Path file=new Path(path,"childfile");
ContractTestUtils.writeTextFile(getFileSystem(),file,"goodbye, world",true);
try {
ContractTestUtils.rejectRootOperation(path);
boolean deleted=getFileSystem().delete(path,false);
fail("non recursive delete should have raised an exception," + " but completed with exit code " + deleted);
}
catch ( IOException expected) {
handleExpectedException(expected);
}
ContractTestUtils.assertIsDirectory(getFileSystem(),path);
}
APIUtilityVerifier EqualityVerifier
@Test public void testOpenReadZeroByteFile() throws Throwable {
describe("create & read a 0 byte file");
Path path=path("zero.txt");
touch(getFileSystem(),path);
instream=getFileSystem().open(path);
assertEquals(0,instream.getPos());
int result=instream.read();
assertMinusOne("initial byte read",result);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testOpenFileTwice() throws Throwable {
describe("verify that two opened file streams are independent");
Path path=path("testopenfiletwice.txt");
byte[] block=dataset(TEST_FILE_LEN,0,255);
createFile(getFileSystem(),path,false,block);
FSDataInputStream instream1=getFileSystem().open(path);
int c=instream1.read();
assertEquals(0,c);
FSDataInputStream instream2=null;
try {
instream2=getFileSystem().open(path);
assertEquals("first read of instream 2",0,instream2.read());
assertEquals("second read of instream 1",1,instream1.read());
instream1.close();
assertEquals("second read of instream 2",1,instream2.read());
instream1.close();
}
finally {
IOUtils.closeStream(instream1);
IOUtils.closeStream(instream2);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSequentialRead() throws Throwable {
describe("verify that sequential read() operations return values");
Path path=path("testsequentialread.txt");
int len=4;
int base=0x40;
byte[] block=dataset(len,base,base + len);
createFile(getFileSystem(),path,false,block);
instream=getFileSystem().open(path);
assertEquals(base,instream.read());
assertEquals(base + 1,instream.read());
assertEquals(base + 2,instream.read());
assertEquals(base + 3,instream.read());
assertEquals(-1,instream.read());
assertEquals(-1,instream.read());
instream.close();
}
APIUtilityVerifier BooleanVerifier
@Test public void testRenameDirIntoExistingDir() throws Throwable {
describe("Verify renaming a dir into an existing dir puts it underneath" + " and leaves existing files alone");
FileSystem fs=getFileSystem();
String sourceSubdir="source";
Path srcDir=path(sourceSubdir);
Path srcFilePath=new Path(srcDir,"source-256.txt");
byte[] srcDataset=dataset(256,'a','z');
writeDataset(fs,srcFilePath,srcDataset,srcDataset.length,1024,false);
Path destDir=path("dest");
Path destFilePath=new Path(destDir,"dest-512.txt");
byte[] destDateset=dataset(512,'A','Z');
writeDataset(fs,destFilePath,destDateset,destDateset.length,1024,false);
assertIsFile(destFilePath);
boolean rename=rename(srcDir,destDir);
Path renamedSrc=new Path(destDir,sourceSubdir);
assertIsFile(destFilePath);
assertIsDirectory(renamedSrc);
ContractTestUtils.verifyFileContents(fs,destFilePath,destDateset);
assertTrue("rename returned false though the contents were copied",rename);
}
APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier HybridVerifier
/**
* Rename test -handles filesystems that will overwrite the destination
* as well as those that do not (i.e. HDFS).
* @throws Throwable
*/
@Test public void testRenameFileOverExistingFile() throws Throwable {
describe("Verify renaming a file onto an existing file matches expectations");
Path srcFile=path("source-256.txt");
byte[] srcData=dataset(256,'a','z');
writeDataset(getFileSystem(),srcFile,srcData,srcData.length,1024,false);
Path destFile=path("dest-512.txt");
byte[] destData=dataset(512,'A','Z');
writeDataset(getFileSystem(),destFile,destData,destData.length,1024,false);
assertIsFile(destFile);
boolean renameOverwritesDest=isSupported(RENAME_OVERWRITES_DEST);
boolean renameReturnsFalseOnRenameDestExists=!isSupported(RENAME_RETURNS_FALSE_IF_DEST_EXISTS);
boolean destUnchanged=true;
try {
boolean renamed=rename(srcFile,destFile);
if (renameOverwritesDest) {
assertTrue("Rename returned false",renamed);
destUnchanged=false;
}
else {
if (renamed && !renameReturnsFalseOnRenameDestExists) {
String destDirLS=generateAndLogErrorListing(srcFile,destFile);
getLog().error("dest dir {}",destDirLS);
fail("expected rename(" + srcFile + ", "+ destFile+ " ) to fail,"+ " but got success and destination of "+ destDirLS);
}
}
}
catch ( FileAlreadyExistsException e) {
handleExpectedException(e);
}
ContractTestUtils.verifyFileContents(getFileSystem(),destFile,destUnchanged ? destData : srcData);
}
APIUtilityVerifier BooleanVerifier
@Test public void testRenameNewFileSameDir() throws Throwable {
describe("rename a file into a new file in the same directory");
Path renameSrc=path("rename_src");
Path renameTarget=path("rename_dest");
byte[] data=dataset(256,'a','z');
writeDataset(getFileSystem(),renameSrc,data,data.length,1024 * 1024,false);
boolean rename=rename(renameSrc,renameTarget);
assertTrue("rename(" + renameSrc + ", "+ renameTarget+ ") returned false",rename);
ContractTestUtils.assertListStatusFinds(getFileSystem(),renameTarget.getParent(),renameTarget);
ContractTestUtils.verifyFileContents(getFileSystem(),renameTarget,data);
}
APIUtilityVerifier BranchVerifier BooleanVerifier
@Test public void testRenameFileNonexistentDir() throws Throwable {
describe("rename a file into a new file in the same directory");
Path renameSrc=path("testRenameSrc");
Path renameTarget=path("subdir/testRenameTarget");
byte[] data=dataset(256,'a','z');
writeDataset(getFileSystem(),renameSrc,data,data.length,1024 * 1024,false);
boolean renameCreatesDestDirs=isSupported(RENAME_CREATES_DEST_DIRS);
try {
boolean rename=rename(renameSrc,renameTarget);
if (renameCreatesDestDirs) {
assertTrue(rename);
ContractTestUtils.verifyFileContents(getFileSystem(),renameTarget,data);
}
else {
assertFalse(rename);
ContractTestUtils.verifyFileContents(getFileSystem(),renameSrc,data);
}
}
catch ( FileNotFoundException e) {
assertFalse(renameCreatesDestDirs);
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testRenameNonexistentFile() throws Throwable {
describe("rename a file into a new file in the same directory");
Path missing=path("testRenameNonexistentFileSrc");
Path target=path("testRenameNonexistentFileDest");
boolean renameReturnsFalseOnFailure=isSupported(ContractOptions.RENAME_RETURNS_FALSE_IF_SOURCE_MISSING);
mkdirs(missing.getParent());
try {
boolean renamed=rename(missing,target);
if (!renameReturnsFalseOnFailure) {
String destDirLS=generateAndLogErrorListing(missing,target);
fail("expected rename(" + missing + ", "+ target+ " ) to fail,"+ " got a result of "+ renamed+ " and a destination directory of "+ destDirLS);
}
else {
getLog().warn("Rename returned {} renaming a nonexistent file",renamed);
assertFalse("Renaming a missing file returned true",renamed);
}
}
catch ( FileNotFoundException e) {
if (renameReturnsFalseOnFailure) {
ContractTestUtils.fail("Renaming a missing file unexpectedly threw an exception",e);
}
handleExpectedException(e);
}
catch ( IOException e) {
handleRelaxedException("rename nonexistent file","FileNotFoundException",e);
}
assertPathDoesNotExist("rename nonexistent file created a destination file",target);
}
APIUtilityVerifier UtilityVerifier
@Test public void testRmNonEmptyRootDirNonRecursive() throws Throwable {
skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
Path root=new Path("/");
String touchfile="/testRmNonEmptyRootDirNonRecursive";
Path file=new Path(touchfile);
ContractTestUtils.touch(getFileSystem(),file);
ContractTestUtils.assertIsDirectory(getFileSystem(),root);
try {
boolean deleted=getFileSystem().delete(root,false);
fail("non recursive delete should have raised an exception," + " but completed with exit code " + deleted);
}
catch ( IOException e) {
handleExpectedException(e);
}
finally {
getFileSystem().delete(file,false);
}
ContractTestUtils.assertIsDirectory(getFileSystem(),root);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Seek round a file bigger than IO buffers
* @throws Throwable
*/
@Test public void testSeekBigFile() throws Throwable {
describe("Seek round a large file and verify the bytes are what is expected");
Path testSeekFile=path("bigseekfile.txt");
byte[] block=dataset(65536,0,255);
createFile(getFileSystem(),testSeekFile,false,block);
instream=getFileSystem().open(testSeekFile);
assertEquals(0,instream.getPos());
instream.seek(0);
int result=instream.read();
assertEquals(0,result);
assertEquals(1,instream.read());
assertEquals(2,instream.read());
instream.seek(32768);
assertEquals("@32768",block[32768],(byte)instream.read());
instream.seek(40000);
assertEquals("@40000",block[40000],(byte)instream.read());
instream.seek(8191);
assertEquals("@8191",block[8191],(byte)instream.read());
instream.seek(0);
assertEquals("@0",0,(byte)instream.read());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSeekAndReadPastEndOfFile() throws Throwable {
describe("verify that reading past the last bytes in the file returns -1");
instream=getFileSystem().open(smallSeekFile);
assertEquals(0,instream.getPos());
instream.seek(TEST_FILE_LEN - 2);
assertTrue("Premature EOF",instream.read() != -1);
assertTrue("Premature EOF",instream.read() != -1);
assertMinusOne("read past end of file",instream.read());
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier
/**
* Seek and read on a closed file.
* Some filesystems let callers seek on a closed file -these must
* still fail on the subsequent reads.
* @throws Throwable
*/
@Test public void testSeekReadClosedFile() throws Throwable {
boolean supportsSeekOnClosedFiles=isSupported(SUPPORTS_SEEK_ON_CLOSED_FILE);
instream=getFileSystem().open(smallSeekFile);
getLog().debug("Stream is of type " + instream.getClass().getCanonicalName());
instream.close();
try {
instream.seek(0);
if (!supportsSeekOnClosedFiles) {
fail("seek succeeded on a closed stream");
}
}
catch ( IOException e) {
}
try {
int data=instream.available();
fail("read() succeeded on a closed stream, got " + data);
}
catch ( IOException e) {
}
try {
int data=instream.read();
fail("read() succeeded on a closed stream, got " + data);
}
catch ( IOException e) {
}
try {
byte[] buffer=new byte[1];
int result=instream.read(buffer,0,1);
fail("read(buffer, 0, 1) succeeded on a closed stream, got " + result);
}
catch ( IOException e) {
}
try {
long offset=instream.getPos();
}
catch ( IOException e) {
}
instream.close();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPositionedBulkReadDoesntChangePosition() throws Throwable {
describe("verify that a positioned read does not change the getPos() value");
Path testSeekFile=path("bigseekfile.txt");
byte[] block=dataset(65536,0,255);
createFile(getFileSystem(),testSeekFile,false,block);
instream=getFileSystem().open(testSeekFile);
instream.seek(39999);
assertTrue(-1 != instream.read());
assertEquals(40000,instream.getPos());
byte[] readBuffer=new byte[256];
instream.read(128,readBuffer,0,readBuffer.length);
assertEquals(40000,instream.getPos());
assertEquals("@40000",block[40000],(byte)instream.read());
for (int i=0; i < 256; i++) {
assertEquals("@" + i,block[i + 128],readBuffer[i]);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testSeekPastEndOfFileThenReseekAndRead() throws Throwable {
describe("do a seek past the EOF, then verify the stream recovers");
instream=getFileSystem().open(smallSeekFile);
boolean canSeekPastEOF=!getContract().isSupported(ContractOptions.REJECTS_SEEK_PAST_EOF,true);
try {
instream.seek(TEST_FILE_LEN + 1);
assertMinusOne("read after seeking past EOF",instream.read());
}
catch ( EOFException e) {
if (canSeekPastEOF) {
throw e;
}
handleExpectedException(e);
}
catch ( IOException e) {
if (canSeekPastEOF) {
throw e;
}
handleRelaxedException("a seek past the end of the file","EOFException",e);
}
instream.seek(1);
assertTrue("Premature EOF",instream.read() != -1);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSeekFile() throws Throwable {
describe("basic seek operations");
instream=getFileSystem().open(smallSeekFile);
assertEquals(0,instream.getPos());
instream.seek(0);
int result=instream.read();
assertEquals(0,result);
assertEquals(1,instream.read());
assertEquals(2,instream.getPos());
assertEquals(2,instream.read());
assertEquals(3,instream.getPos());
instream.seek(128);
assertEquals(128,instream.getPos());
assertEquals(128,instream.read());
instream.seek(63);
assertEquals(63,instream.read());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNegativeSeek() throws Throwable {
instream=getFileSystem().open(smallSeekFile);
assertEquals(0,instream.getPos());
try {
instream.seek(-1);
long p=instream.getPos();
LOG.warn("Seek to -1 returned a position of " + p);
int result=instream.read();
fail("expected an exception, got data " + result + " at a position of "+ p);
}
catch ( EOFException e) {
handleExpectedException(e);
}
catch ( IOException e) {
handleRelaxedException("a negative seek","EOFException",e);
}
assertEquals(0,instream.getPos());
}
APIUtilityVerifier EqualityVerifier
@Test public void testSeekZeroByteFile() throws Throwable {
describe("seek and read a 0 byte file");
instream=getFileSystem().open(zeroByteFile);
assertEquals(0,instream.getPos());
int result=instream.read();
assertMinusOne("initial byte read",result);
byte[] buffer=new byte[1];
instream.seek(0);
result=instream.read();
assertMinusOne("post-seek byte read",result);
result=instream.read(buffer,0,1);
assertMinusOne("post-seek buffer read",result);
}
APIUtilityVerifier EqualityVerifier
@Test public void testBlockReadZeroByteFile() throws Throwable {
describe("do a block read on a 0 byte file");
instream=getFileSystem().open(zeroByteFile);
assertEquals(0,instream.getPos());
byte[] buffer=new byte[1];
int result=instream.read(buffer,0,1);
assertMinusOne("block read zero byte file",result);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test ACL operations on a directory, including default ACLs.
* General strategy is to use GETFILESTATUS and GETACLSTATUS to verify:
*
* - Initial status with no ACLs
* - The addition of a default ACL
* - The removal of default ACLs
*
* @throws Exception
*/
@Test @TestDir @TestJetty @TestHdfs public void testDirAcls() throws Exception {
final String defUser1="default:user:glarch:r-x";
final String defSpec1="aclspec=" + defUser1;
final String dir="/aclDirTest";
String statusJson;
List aclEntries;
createHttpFSServer(false);
FileSystem fs=FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path(dir));
statusJson=getStatus(dir,"GETFILESTATUS");
Assert.assertEquals(-1,statusJson.indexOf("aclBit"));
statusJson=getStatus(dir,"GETACLSTATUS");
aclEntries=getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 0);
putCmd(dir,"SETACL",defSpec1);
statusJson=getStatus(dir,"GETFILESTATUS");
Assert.assertNotEquals(-1,statusJson.indexOf("aclBit"));
statusJson=getStatus(dir,"GETACLSTATUS");
aclEntries=getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 5);
Assert.assertTrue(aclEntries.contains(defUser1));
putCmd(dir,"REMOVEDEFAULTACL",null);
statusJson=getStatus(dir,"GETFILESTATUS");
Assert.assertEquals(-1,statusJson.indexOf("aclBit"));
statusJson=getStatus(dir,"GETACLSTATUS");
aclEntries=getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 0);
}
APIUtilityVerifier EqualityVerifier
@Test @TestDir @TestJetty @TestHdfs public void testGlobFilter() throws Exception {
createHttpFSServer(false);
FileSystem fs=FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path("/tmp"));
fs.create(new Path("/tmp/foo.txt")).close();
String user=HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*",user));
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK);
BufferedReader reader=new BufferedReader(new InputStreamReader(conn.getInputStream()));
reader.readLine();
reader.close();
}
APIUtilityVerifier EqualityVerifier
@Test @TestDir @TestJetty @TestHdfs public void testPutNoOperation() throws Exception {
createHttpFSServer(false);
String user=HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1/foo?user.name={0}",user));
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.setDoInput(true);
conn.setDoOutput(true);
conn.setRequestMethod("PUT");
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_BAD_REQUEST);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @TestDir @TestJetty @TestHdfs public void instrumentation() throws Exception {
createHttpFSServer(false);
URL url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation","nobody"));
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_UNAUTHORIZED);
url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",HadoopUsersConfTestHelper.getHadoopUsers()[0]));
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK);
BufferedReader reader=new BufferedReader(new InputStreamReader(conn.getInputStream()));
String line=reader.readLine();
reader.close();
Assert.assertTrue(line.contains("\"counters\":{"));
url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation",HadoopUsersConfTestHelper.getHadoopUsers()[0]));
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_BAD_REQUEST);
}
APIUtilityVerifier EqualityVerifier
@Test @TestDir @TestJetty @TestHdfs public void testOpenOffsetLength() throws Exception {
createHttpFSServer(false);
byte[] array=new byte[]{0,1,2,3};
FileSystem fs=FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path("/tmp"));
OutputStream os=fs.create(new Path("/tmp/foo"));
os.write(array);
os.close();
String user=HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2",user));
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
InputStream is=conn.getInputStream();
Assert.assertEquals(1,is.read());
Assert.assertEquals(2,is.read());
Assert.assertEquals(-1,is.read());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @TestDir @TestJetty @TestHdfs public void testDelegationTokenOperations() throws Exception {
createHttpFSServer(true);
URL url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode());
AuthenticationToken token=new AuthenticationToken("u","p",new KerberosDelegationTokenAuthenticationHandler().getType());
token.setExpires(System.currentTimeMillis() + 100000000);
Signer signer=new Signer(new StringSignerSecretProvider("secret"));
String tokenSigned=signer.sign(token.toString());
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY");
conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty("Cookie",AuthenticatedURL.AUTH_COOKIE + "=" + tokenSigned);
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETDELEGATIONTOKEN");
conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty("Cookie",AuthenticatedURL.AUTH_COOKIE + "=" + tokenSigned);
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
JSONObject json=(JSONObject)new JSONParser().parse(new InputStreamReader(conn.getInputStream()));
json=(JSONObject)json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
String tokenStr=(String)json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode());
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
conn.setRequestProperty("Cookie",AuthenticatedURL.AUTH_COOKIE + "=" + tokenSigned);
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=CANCELDELEGATIONTOKEN&token=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,conn.getResponseCode());
}
APIUtilityVerifier EqualityVerifier
/**
* Validate XAttr get/set/remove calls.
*/
@Test @TestDir @TestJetty @TestHdfs public void testXAttrs() throws Exception {
final String name1="user.a1";
final byte[] value1=new byte[]{0x31,0x32,0x33};
final String name2="user.a2";
final byte[] value2=new byte[]{0x41,0x42,0x43};
final String dir="/xattrTest";
final String path=dir + "/file";
createHttpFSServer(false);
FileSystem fs=FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path(dir));
createWithHttp(path,null);
String statusJson=getStatus(path,"GETXATTRS");
Map xAttrs=getXAttrs(statusJson);
Assert.assertEquals(0,xAttrs.size());
putCmd(path,"SETXATTR",setXAttrParam(name1,value1));
putCmd(path,"SETXATTR",setXAttrParam(name2,value2));
statusJson=getStatus(path,"GETXATTRS");
xAttrs=getXAttrs(statusJson);
Assert.assertEquals(2,xAttrs.size());
Assert.assertArrayEquals(value1,xAttrs.get(name1));
Assert.assertArrayEquals(value2,xAttrs.get(name2));
putCmd(path,"REMOVEXATTR","xattr.name=" + name1);
statusJson=getStatus(path,"GETXATTRS");
xAttrs=getXAttrs(statusJson);
Assert.assertEquals(1,xAttrs.size());
Assert.assertArrayEquals(value2,xAttrs.get(name2));
putCmd(path,"REMOVEXATTR","xattr.name=" + name2);
statusJson=getStatus(path,"GETXATTRS");
xAttrs=getXAttrs(statusJson);
Assert.assertEquals(0,xAttrs.size());
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Validate the various ACL set/modify/remove calls. General strategy is
* to verify each of the following steps with GETFILESTATUS, LISTSTATUS,
* and GETACLSTATUS:
*
* - Create a file with no ACLs
* - Add a user + group ACL
* - Add another user ACL
* - Remove the first user ACL
* - Remove all ACLs
*
*/
@Test @TestDir @TestJetty @TestHdfs public void testFileAcls() throws Exception {
final String aclUser1="user:foo:rw-";
final String aclUser2="user:bar:r--";
final String aclGroup1="group::r--";
final String aclSpec="aclspec=user::rwx," + aclUser1 + ","+ aclGroup1+ ",other::---";
final String modAclSpec="aclspec=" + aclUser2;
final String remAclSpec="aclspec=" + aclUser1;
final String dir="/aclFileTest";
final String path=dir + "/test";
String statusJson;
List aclEntries;
createHttpFSServer(false);
FileSystem fs=FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path(dir));
createWithHttp(path,null);
statusJson=getStatus(path,"GETFILESTATUS");
Assert.assertEquals(-1,statusJson.indexOf("aclBit"));
statusJson=getStatus(dir,"LISTSTATUS");
Assert.assertEquals(-1,statusJson.indexOf("aclBit"));
statusJson=getStatus(path,"GETACLSTATUS");
aclEntries=getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 0);
putCmd(path,"SETACL",aclSpec);
statusJson=getStatus(path,"GETFILESTATUS");
Assert.assertNotEquals(-1,statusJson.indexOf("aclBit"));
statusJson=getStatus(dir,"LISTSTATUS");
Assert.assertNotEquals(-1,statusJson.indexOf("aclBit"));
statusJson=getStatus(path,"GETACLSTATUS");
aclEntries=getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 2);
Assert.assertTrue(aclEntries.contains(aclUser1));
Assert.assertTrue(aclEntries.contains(aclGroup1));
putCmd(path,"MODIFYACLENTRIES",modAclSpec);
statusJson=getStatus(path,"GETACLSTATUS");
aclEntries=getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 3);
Assert.assertTrue(aclEntries.contains(aclUser1));
Assert.assertTrue(aclEntries.contains(aclUser2));
Assert.assertTrue(aclEntries.contains(aclGroup1));
putCmd(path,"REMOVEACLENTRIES",remAclSpec);
statusJson=getStatus(path,"GETACLSTATUS");
aclEntries=getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 2);
Assert.assertTrue(aclEntries.contains(aclUser2));
Assert.assertTrue(aclEntries.contains(aclGroup1));
putCmd(path,"REMOVEACL",null);
statusJson=getStatus(path,"GETACLSTATUS");
aclEntries=getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 0);
statusJson=getStatus(path,"GETFILESTATUS");
Assert.assertEquals(-1,statusJson.indexOf("aclBit"));
statusJson=getStatus(dir,"LISTSTATUS");
Assert.assertEquals(-1,statusJson.indexOf("aclBit"));
}
APIUtilityVerifier EqualityVerifier
@Test @TestDir @TestJetty @TestHdfs public void testHdfsAccess() throws Exception {
createHttpFSServer(false);
String user=HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus",user));
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK);
BufferedReader reader=new BufferedReader(new InputStreamReader(conn.getInputStream()));
reader.readLine();
reader.close();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @TestDir @TestJetty @TestHdfs public void testDelegationTokenHttpFSAccess() throws Exception {
createHttpFSServer();
KerberosTestUtils.doAsClient(new Callable(){
@Override public Void call() throws Exception {
URL url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETDELEGATIONTOKEN");
AuthenticatedURL aUrl=new AuthenticatedURL();
AuthenticatedURL.Token aToken=new AuthenticatedURL.Token();
HttpURLConnection conn=aUrl.openConnection(url,aToken);
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK);
JSONObject json=(JSONObject)new JSONParser().parse(new InputStreamReader(conn.getInputStream()));
json=(JSONObject)json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
String tokenStr=(String)json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK);
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_UNAUTHORIZED);
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
conn=aUrl.openConnection(url,aToken);
conn.setRequestMethod("PUT");
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK);
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=CANCELDELEGATIONTOKEN&token=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK);
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_UNAUTHORIZED);
return null;
}
}
);
}
APIUtilityVerifier EqualityVerifier
@Test @TestDir @TestJetty @TestHdfs public void testInvalidadHttpFSAccess() throws Exception {
createHttpFSServer();
URL url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_UNAUTHORIZED);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test if the structure generator works fine
*/
@Test public void testStructureGenerator() throws Exception {
StructureGenerator sg=new StructureGenerator();
String[] args=new String[]{"-maxDepth","2","-minWidth","1","-maxWidth","2","-numOfFiles","2","-avgFileSize","1","-outDir",OUT_DIR.getAbsolutePath(),"-seed","1"};
final int MAX_DEPTH=1;
final int MIN_WIDTH=3;
final int MAX_WIDTH=5;
final int NUM_OF_FILES=7;
final int AVG_FILE_SIZE=9;
final int SEED=13;
try {
assertEquals(0,sg.run(args));
BufferedReader in=new BufferedReader(new FileReader(DIR_STRUCTURE_FILE));
assertEquals(DIR_STRUCTURE_FIRST_LINE,in.readLine());
assertEquals(DIR_STRUCTURE_SECOND_LINE,in.readLine());
assertEquals(null,in.readLine());
in.close();
in=new BufferedReader(new FileReader(FILE_STRUCTURE_FILE));
assertEquals(FILE_STRUCTURE_FIRST_LINE,in.readLine());
assertEquals(FILE_STRUCTURE_SECOND_LINE,in.readLine());
assertEquals(null,in.readLine());
in.close();
String oldArg=args[MAX_DEPTH];
args[MAX_DEPTH]="0";
assertEquals(-1,sg.run(args));
args[MAX_DEPTH]=oldArg;
oldArg=args[MIN_WIDTH];
args[MIN_WIDTH]="-1";
assertEquals(-1,sg.run(args));
args[MIN_WIDTH]=oldArg;
oldArg=args[MAX_WIDTH];
args[MAX_WIDTH]="-1";
assertEquals(-1,sg.run(args));
args[MAX_WIDTH]=oldArg;
oldArg=args[NUM_OF_FILES];
args[NUM_OF_FILES]="-1";
assertEquals(-1,sg.run(args));
args[NUM_OF_FILES]=oldArg;
oldArg=args[NUM_OF_FILES];
args[NUM_OF_FILES]="-1";
assertEquals(-1,sg.run(args));
args[NUM_OF_FILES]=oldArg;
oldArg=args[AVG_FILE_SIZE];
args[AVG_FILE_SIZE]="-1";
assertEquals(-1,sg.run(args));
args[AVG_FILE_SIZE]=oldArg;
oldArg=args[SEED];
args[SEED]="34.d4";
assertEquals(-1,sg.run(args));
args[SEED]=oldArg;
}
finally {
DIR_STRUCTURE_FILE.delete();
FILE_STRUCTURE_FILE.delete();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test if the load generator works fine
*/
@Test public void testLoadGenerator() throws Exception {
final String TEST_SPACE_ROOT="/test";
final String SCRIPT_TEST_DIR=OUT_DIR.getAbsolutePath();
String script=SCRIPT_TEST_DIR + "/" + "loadgenscript";
String script2=SCRIPT_TEST_DIR + "/" + "loadgenscript2";
File scriptFile1=new File(script);
File scriptFile2=new File(script2);
FileWriter writer=new FileWriter(DIR_STRUCTURE_FILE);
writer.write(DIR_STRUCTURE_FIRST_LINE + "\n");
writer.write(DIR_STRUCTURE_SECOND_LINE + "\n");
writer.close();
writer=new FileWriter(FILE_STRUCTURE_FILE);
writer.write(FILE_STRUCTURE_FIRST_LINE + "\n");
writer.write(FILE_STRUCTURE_SECOND_LINE + "\n");
writer.close();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(CONF).numDataNodes(3).build();
cluster.waitActive();
try {
DataGenerator dg=new DataGenerator();
dg.setConf(CONF);
String[] args=new String[]{"-inDir",OUT_DIR.getAbsolutePath(),"-root",TEST_SPACE_ROOT};
assertEquals(0,dg.run(args));
final int READ_PROBABILITY=1;
final int WRITE_PROBABILITY=3;
final int MAX_DELAY_BETWEEN_OPS=7;
final int NUM_OF_THREADS=9;
final int START_TIME=11;
final int ELAPSED_TIME=13;
LoadGenerator lg=new LoadGenerator();
lg.setConf(CONF);
args=new String[]{"-readProbability","0.3","-writeProbability","0.3","-root",TEST_SPACE_ROOT,"-maxDelayBetweenOps","0","-numOfThreads","1","-startTime",Long.toString(Time.now()),"-elapsedTime","10"};
assertEquals(0,lg.run(args));
String oldArg=args[READ_PROBABILITY];
args[READ_PROBABILITY]="1.1";
assertEquals(-1,lg.run(args));
args[READ_PROBABILITY]="-1.1";
assertEquals(-1,lg.run(args));
args[READ_PROBABILITY]=oldArg;
oldArg=args[WRITE_PROBABILITY];
args[WRITE_PROBABILITY]="1.1";
assertEquals(-1,lg.run(args));
args[WRITE_PROBABILITY]="-1.1";
assertEquals(-1,lg.run(args));
args[WRITE_PROBABILITY]="0.9";
assertEquals(-1,lg.run(args));
args[READ_PROBABILITY]=oldArg;
oldArg=args[MAX_DELAY_BETWEEN_OPS];
args[MAX_DELAY_BETWEEN_OPS]="1.x1";
assertEquals(-1,lg.run(args));
args[MAX_DELAY_BETWEEN_OPS]=oldArg;
oldArg=args[MAX_DELAY_BETWEEN_OPS];
args[MAX_DELAY_BETWEEN_OPS]="1.x1";
assertEquals(-1,lg.run(args));
args[MAX_DELAY_BETWEEN_OPS]=oldArg;
oldArg=args[NUM_OF_THREADS];
args[NUM_OF_THREADS]="-1";
assertEquals(-1,lg.run(args));
args[NUM_OF_THREADS]=oldArg;
oldArg=args[START_TIME];
args[START_TIME]="-1";
assertEquals(-1,lg.run(args));
args[START_TIME]=oldArg;
oldArg=args[ELAPSED_TIME];
args[ELAPSED_TIME]="-1";
assertEquals(-1,lg.run(args));
args[ELAPSED_TIME]=oldArg;
FileWriter fw=new FileWriter(scriptFile1);
fw.write("2 .22 .33\n");
fw.write("3 .10 .6\n");
fw.write("6 0 .7\n");
fw.close();
String[] scriptArgs=new String[]{"-root",TEST_SPACE_ROOT,"-maxDelayBetweenOps","0","-numOfThreads","10","-startTime",Long.toString(Time.now()),"-scriptFile",script};
assertEquals(0,lg.run(scriptArgs));
fw=new FileWriter(scriptFile2);
fw.write("2 .22 .33\n");
fw.write("3 blah blah blah .6\n");
fw.write("6 0 .7\n");
fw.close();
scriptArgs[scriptArgs.length - 1]=script2;
assertEquals(-1,lg.run(scriptArgs));
}
finally {
cluster.shutdown();
DIR_STRUCTURE_FILE.delete();
FILE_STRUCTURE_FILE.delete();
scriptFile1.delete();
scriptFile2.delete();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Tests whether binary Avro data files are displayed correctly.
*/
@Test public void testDisplayForAvroFiles() throws Exception {
createAvroFile(generateWeatherAvroBinaryData());
Configuration conf=fs.getConf();
PathData pathData=new PathData(AVRO_FILENAME.toString(),conf);
Display.Text text=new Display.Text();
text.setConf(conf);
Method method=text.getClass().getDeclaredMethod("getInputStream",PathData.class);
method.setAccessible(true);
InputStream stream=(InputStream)method.invoke(text,pathData);
String output=inputStreamToString(stream);
String expectedOutput="{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" + System.getProperty("line.separator") + "{\"station\":\"011990-99999\",\"time\":-619506000000,\"temp\":22}"+ System.getProperty("line.separator")+ "{\"station\":\"011990-99999\",\"time\":-619484400000,\"temp\":-11}"+ System.getProperty("line.separator")+ "{\"station\":\"012650-99999\",\"time\":-655531200000,\"temp\":111}"+ System.getProperty("line.separator")+ "{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}"+ System.getProperty("line.separator");
assertEquals(expectedOutput,output);
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=30000) public void testRelativeGlob() throws Exception {
PathData[] items=PathData.expandAsGlob("d1/f1*",conf);
assertEquals(sortedString("d1/f1","d1/f1.1"),sortedString(items));
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=30000) public void testRelativeGlobBack() throws Exception {
fs.setWorkingDirectory(new Path("d1"));
PathData[] items=PathData.expandAsGlob("../d2/*",conf);
assertEquals(sortedString("../d2/f3"),sortedString(items));
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=30000) public void testAbsoluteGlob() throws Exception {
PathData[] items=PathData.expandAsGlob(testDir + "/d1/f1*",conf);
assertEquals(sortedString(testDir + "/d1/f1",testDir + "/d1/f1.1"),sortedString(items));
String absolutePathNoDriveLetter=testDir + "/d1/f1";
if (Shell.WINDOWS) {
absolutePathNoDriveLetter=absolutePathNoDriveLetter.substring(2);
}
items=PathData.expandAsGlob(absolutePathNoDriveLetter,conf);
assertEquals(sortedString(absolutePathNoDriveLetter),sortedString(items));
items=PathData.expandAsGlob(".",conf);
assertEquals(sortedString("."),sortedString(items));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Tests whether binary Avro data files are displayed correctly.
*/
@Test(timeout=30000) public void testDisplayForAvroFiles() throws Exception {
createAvroFile(generateWeatherAvroBinaryData());
Configuration conf=new Configuration();
URI localPath=new URI(AVRO_FILENAME);
PathData pathData=new PathData(localPath,conf);
Display.Text text=new Display.Text();
text.setConf(conf);
Method method=text.getClass().getDeclaredMethod("getInputStream",PathData.class);
method.setAccessible(true);
InputStream stream=(InputStream)method.invoke(text,pathData);
String output=inputStreamToString(stream);
String expectedOutput="{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" + System.getProperty("line.separator") + "{\"station\":\"011990-99999\",\"time\":-619506000000,\"temp\":22}"+ System.getProperty("line.separator")+ "{\"station\":\"011990-99999\",\"time\":-619484400000,\"temp\":-11}"+ System.getProperty("line.separator")+ "{\"station\":\"012650-99999\",\"time\":-655531200000,\"temp\":111}"+ System.getProperty("line.separator")+ "{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}"+ System.getProperty("line.separator");
assertEquals(expectedOutput,output);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMRFlow() throws Exception {
ConfigExtractor extractor=getTestConfig(false);
SliveTest s=new SliveTest(getBaseConfig());
int ec=ToolRunner.run(s,getTestArgs(false));
assertTrue(ec == 0);
String resFile=extractor.getResultFile();
File fn=new File(resFile);
assertTrue(fn.exists());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Seek past the buffer then read
* @throws Throwable problems
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekAndReadPastEndOfFile() throws Throwable {
instream=fs.open(readFile);
assertEquals(0,instream.getPos());
instream.seek(SEEK_FILE_LEN - 2);
assertTrue("Premature EOF",instream.read() != -1);
assertTrue("Premature EOF",instream.read() != -1);
assertMinusOne("read past end of file",instream.read());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Seek past the buffer and attempt a read(buffer)
* @throws Throwable failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekBulkReadPastEndOfFile() throws Throwable {
instream=fs.open(readFile);
assertEquals(0,instream.getPos());
instream.seek(SEEK_FILE_LEN - 1);
byte[] buffer=new byte[1];
int result=instream.read(buffer,0,1);
result=instream.read(buffer,0,1);
assertMinusOne("read past end of file",result);
result=instream.read(buffer,0,1);
assertMinusOne("read past end of file",result);
result=instream.read(buffer,0,0);
assertEquals("EOF checks coming before read range check",0,result);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekAndPastEndOfFileThenReseekAndRead() throws Throwable {
instream=fs.open(smallSeekFile);
try {
instream.seek(SMALL_SEEK_FILE_LEN);
assertMinusOne("read after seeking past EOF",instream.read());
}
catch ( EOFException expected) {
}
instream.seek(1);
assertTrue("Premature EOF",instream.read() != -1);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testPositionedBulkReadDoesntChangePosition() throws Throwable {
Path testSeekFile=new Path(testPath,"bigseekfile.txt");
byte[] block=SwiftTestUtils.dataset(65536,0,255);
createFile(testSeekFile,block);
instream=fs.open(testSeekFile);
instream.seek(39999);
assertTrue(-1 != instream.read());
assertEquals(40000,instream.getPos());
byte[] readBuffer=new byte[256];
instream.read(128,readBuffer,0,readBuffer.length);
assertEquals(40000,instream.getPos());
assertEquals("@40000",block[40000],(byte)instream.read());
for (int i=0; i < 256; i++) {
assertEquals("@" + i,block[i + 128],readBuffer[i]);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekAndReadPastEndOfFile() throws Throwable {
instream=fs.open(smallSeekFile);
assertEquals(0,instream.getPos());
instream.seek(SMALL_SEEK_FILE_LEN - 2);
assertTrue("Premature EOF",instream.read() != -1);
assertTrue("Premature EOF",instream.read() != -1);
assertMinusOne("read past end of file",instream.read());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testNegativeSeek() throws Throwable {
instream=fs.open(smallSeekFile);
assertEquals(0,instream.getPos());
try {
instream.seek(-1);
long p=instream.getPos();
LOG.warn("Seek to -1 returned a position of " + p);
int result=instream.read();
fail("expected an exception, got data " + result + " at a position of "+ p);
}
catch ( IOException e) {
}
assertEquals(0,instream.getPos());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekBigFile() throws Throwable {
Path testSeekFile=new Path(testPath,"bigseekfile.txt");
byte[] block=SwiftTestUtils.dataset(65536,0,255);
createFile(testSeekFile,block);
instream=fs.open(testSeekFile);
assertEquals(0,instream.getPos());
instream.seek(0);
int result=instream.read();
assertEquals(0,result);
assertEquals(1,instream.read());
assertEquals(2,instream.read());
instream.seek(32768);
assertEquals("@32768",block[32768],(byte)instream.read());
instream.seek(40000);
assertEquals("@40000",block[40000],(byte)instream.read());
instream.seek(8191);
assertEquals("@8191",block[8191],(byte)instream.read());
instream.seek(0);
assertEquals("@0",0,(byte)instream.read());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekFile() throws Throwable {
instream=fs.open(smallSeekFile);
assertEquals(0,instream.getPos());
instream.seek(0);
int result=instream.read();
assertEquals(0,result);
assertEquals(1,instream.read());
assertEquals(2,instream.getPos());
assertEquals(2,instream.read());
assertEquals(3,instream.getPos());
instream.seek(128);
assertEquals(128,instream.getPos());
assertEquals(128,instream.read());
instream.seek(63);
assertEquals(63,instream.read());
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekZeroByteFile() throws Throwable {
instream=fs.open(zeroByteFile);
assertEquals(0,instream.getPos());
int result=instream.read();
assertMinusOne("initial byte read",result);
byte[] buffer=new byte[1];
instream.seek(0);
result=instream.read();
assertMinusOne("post-seek byte read",result);
result=instream.read(buffer,0,1);
assertMinusOne("post-seek buffer read",result);
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testBlockReadZeroByteFile() throws Throwable {
instream=fs.open(zeroByteFile);
assertEquals(0,instream.getPos());
byte[] buffer=new byte[1];
int result=instream.read(buffer,0,1);
assertMinusOne("block read zero byte file",result);
}
APIUtilityVerifier BooleanVerifier
@Test public void testLocationAwareFalsePropagates() throws Exception {
final Configuration configuration=createCoreConfig();
set(configuration,DOT_LOCATION_AWARE,"false");
SwiftRestClient restClient=mkInstance(configuration);
assertFalse(restClient.isLocationAware());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testProxyData() throws Exception {
final Configuration configuration=createCoreConfig();
String proxy="web-proxy";
int port=8088;
configuration.set(SWIFT_PROXY_HOST_PROPERTY,proxy);
configuration.set(SWIFT_PROXY_PORT_PROPERTY,Integer.toString(port));
SwiftRestClient restClient=mkInstance(configuration);
assertEquals(proxy,restClient.getProxyHost());
assertEquals(port,restClient.getProxyPort());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testPositivePartsize() throws Exception {
final Configuration configuration=createCoreConfig();
int size=127;
configuration.set(SWIFT_PARTITION_SIZE,Integer.toString(size));
SwiftRestClient restClient=mkInstance(configuration);
assertEquals(size,restClient.getPartSizeKB());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testPositiveBlocksize() throws Exception {
final Configuration configuration=createCoreConfig();
int size=127;
configuration.set(SWIFT_BLOCKSIZE,Integer.toString(size));
SwiftRestClient restClient=mkInstance(configuration);
assertEquals(size,restClient.getBlocksizeKB());
}
APIUtilityVerifier BooleanVerifier
@Test public void testLocationAwareTruePropagates() throws Exception {
final Configuration configuration=createCoreConfig();
set(configuration,DOT_LOCATION_AWARE,"true");
SwiftRestClient restClient=mkInstance(configuration);
assertTrue(restClient.isLocationAware());
}
APIUtilityVerifier UtilityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testLongObjectNamesForbidden() throws Throwable {
StringBuilder buffer=new StringBuilder(1200);
buffer.append("/");
for (int i=0; i < (1200 / 4); i++) {
buffer.append(String.format("%04x",i));
}
String pathString=buffer.toString();
Path path=new Path(pathString);
try {
writeTextFile(fs,path,pathString,true);
fs.delete(path,false);
fail("Managed to create an object with a name of length " + pathString.length());
}
catch ( SwiftBadRequestException e) {
}
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testOverwrite() throws Throwable {
Path path=new Path("/test/Overwrite");
try {
String text="Testing a put to a file " + System.currentTimeMillis();
writeTextFile(fs,path,text,false);
assertFileHasLength(fs,path,text.length());
String text2="Overwriting a file " + System.currentTimeMillis();
writeTextFile(fs,path,text2,true);
assertFileHasLength(fs,path,text2.length());
String result=readBytesToString(fs,path,text2.length());
assertEquals(text2,result);
}
finally {
delete(fs,path);
}
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testPutGetFile() throws Throwable {
Path path=new Path("/test/PutGetFile");
try {
String text="Testing a put and get to a file " + System.currentTimeMillis();
writeTextFile(fs,path,text,false);
String result=readBytesToString(fs,path,text.length());
assertEquals(text,result);
}
finally {
delete(fs,path);
}
}
APIUtilityVerifier UtilityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testLsNonExistentFile() throws Exception {
try {
Path path=new Path("/test/hadoop/file");
FileStatus[] statuses=fs.listStatus(path);
fail("Should throw FileNotFoundException on " + path + " but got list of length "+ statuses.length);
}
catch ( FileNotFoundException fnfe) {
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testBlocksizeNonZeroForFile() throws Throwable {
Path smallfile=new Path("/test/smallfile");
SwiftTestUtils.writeTextFile(fs,smallfile,"blocksize",true);
createFile(smallfile);
FileStatus status=getFs().getFileStatus(smallfile);
assertTrue("Zero blocksize in " + status,status.getBlockSize() != 0L);
assertTrue("Zero replication in " + status,status.getReplication() != 0L);
}
APIUtilityVerifier BooleanVerifier
/**
* Asserts that a zero byte file has a status of file and not
* directory or symlink
* @throws Exception on failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testMultiByteFilesAreFiles() throws Exception {
Path src=path("/test/testMultiByteFilesAreFiles");
SwiftTestUtils.writeTextFile(fs,src,"testMultiByteFilesAreFiles",false);
assertIsFile(src);
FileStatus status=fs.getFileStatus(src);
assertFalse(status.isDir());
}
APIUtilityVerifier UtilityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testNoStatusForMissingDirectories() throws Throwable {
Path missing=path("/test/testNoStatusForMissingDirectories");
assertPathDoesNotExist("leftover?",missing);
try {
FileStatus[] statuses=fs.listStatus(missing);
fail("Expected a FileNotFoundException, got the status " + statuses);
}
catch ( FileNotFoundException expected) {
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test that a dir off root has a listStatus() call that
* works as expected. and that when a child is added. it changes
* @throws Exception on failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testDirectoriesOffRootHaveMatchingFileStatus() throws Exception {
Path test=path("/test");
fs.delete(test,true);
mkdirs(test);
assertExists("created test directory",test);
FileStatus[] statuses=fs.listStatus(test);
String statusString=statusToString(test.toString(),statuses);
assertEquals("Wrong number of elements in file status " + statusString,0,statuses.length);
Path src=path("/test/file");
SwiftTestUtils.touch(fs,src);
statuses=fs.listStatus(test);
statusString=statusToString(test.toString(),statuses);
assertEquals("Wrong number of elements in file status " + statusString,1,statuses.length);
SwiftFileStatus stat=(SwiftFileStatus)statuses[0];
assertTrue("isDir(): Not a directory: " + stat,stat.isDir());
extraStatusAssertions(stat);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* test that a dir two levels down has a listStatus() call that
* works as expected.
* @throws Exception on failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testDirectoriesLowerDownHaveMatchingFileStatus() throws Exception {
Path test=path("/test/testDirectoriesLowerDownHaveMatchingFileStatus");
fs.delete(test,true);
mkdirs(test);
assertExists("created test sub directory",test);
FileStatus[] statuses=fs.listStatus(test);
String statusString=statusToString(test.toString(),statuses);
assertEquals("Wrong number of elements in file status " + statusString,0,statuses.length);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testWriteReadFile() throws Exception {
final Path f=new Path("/test/test");
final FSDataOutputStream fsDataOutputStream=fs.create(f);
final String message="Test string";
fsDataOutputStream.write(message.getBytes());
fsDataOutputStream.close();
assertExists("created file",f);
FSDataInputStream open=null;
try {
open=fs.open(f);
final byte[] bytes=new byte[512];
final int read=open.read(bytes);
final byte[] buffer=new byte[read];
System.arraycopy(bytes,0,buffer,0,read);
assertEquals(message,new String(buffer));
}
finally {
fs.delete(f,false);
IOUtils.closeStream(open);
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Assert that a filesystem is case sensitive.
* This is done by creating a mixed-case filename and asserting that
* its lower case version is not there.
* @throws Exception failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testFilesystemIsCaseSensitive() throws Exception {
String mixedCaseFilename="/test/UPPER.TXT";
Path upper=path(mixedCaseFilename);
Path lower=path(mixedCaseFilename.toLowerCase(Locale.ENGLISH));
assertFalse("File exists" + upper,fs.exists(upper));
assertFalse("File exists" + lower,fs.exists(lower));
FSDataOutputStream out=fs.create(upper);
out.writeUTF("UPPER");
out.close();
FileStatus upperStatus=fs.getFileStatus(upper);
assertExists("Original upper case file" + upper,upper);
assertPathDoesNotExist("lower case file",lower);
out=fs.create(lower);
out.writeUTF("l");
out.close();
assertExists("lower case file",lower);
assertExists("Original upper case file " + upper,upper);
FileStatus newStatus=fs.getFileStatus(upper);
assertEquals("Expected status:" + upperStatus + " actual status "+ newStatus,upperStatus.getLen(),newStatus.getLen());
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testListLevelTestHadoop() throws Exception {
createTestSubdirs();
FileStatus[] paths;
paths=fs.listStatus(path("/test/hadoop"));
String stats=dumpStats("/test/hadoop",paths);
assertEquals("Paths.length wrong in " + stats,3,paths.length);
assertEquals("Path element[0] wrong: " + stats,path("/test/hadoop/a"),paths[0].getPath());
assertEquals("Path element[1] wrong: " + stats,path("/test/hadoop/b"),paths[1].getPath());
assertEquals("Path element[2] wrong: " + stats,path("/test/hadoop/c"),paths[2].getPath());
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testListNonEmptyRoot() throws Throwable {
Path test=path("/test");
touch(fs,test);
FileStatus[] fileStatuses=fs.listStatus(path("/"));
String stats=dumpStats("/",fileStatuses);
assertEquals("Wrong #of root children" + stats,1,fileStatuses.length);
FileStatus status=fileStatuses[0];
assertEquals("Wrong path value" + stats,test,status.getPath());
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testListStatusFile() throws Exception {
describe("Create a single file under /test;" + " assert that listStatus(/test) finds it");
Path file=path("/test/filename");
createFile(file);
FileStatus[] pathStats=fs.listStatus(file);
assertEquals(dumpStats("/test/",pathStats),1,pathStats.length);
FileStatus lsStat=pathStats[0];
assertEquals("Wrong file len in listing of " + lsStat,data.length,lsStat.getLen());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* tests functionality for big files ( > 5Gb) upload
*/
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testFilePartUpload() throws Throwable {
final Path path=new Path("/test/testFilePartUpload");
int len=8192;
final byte[] src=SwiftTestUtils.dataset(len,32,144);
FSDataOutputStream out=fs.create(path,false,getBufferSize(),(short)1,BLOCK_SIZE);
try {
int totalPartitionsToWrite=len / PART_SIZE_BYTES;
assertPartitionsWritten("Startup",out,0);
int firstWriteLen=2048;
out.write(src,0,firstWriteLen);
long expected=getExpectedPartitionsWritten(firstWriteLen,PART_SIZE_BYTES,false);
SwiftUtils.debug(LOG,"First write: predict %d partitions written",expected);
assertPartitionsWritten("First write completed",out,expected);
int remainder=len - firstWriteLen;
SwiftUtils.debug(LOG,"remainder: writing: %d bytes",remainder);
out.write(src,firstWriteLen,remainder);
expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,false);
assertPartitionsWritten("Remaining data",out,expected);
out.close();
expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,true);
assertPartitionsWritten("Stream closed",out,expected);
Header[] headers=fs.getStore().getObjectHeaders(path,true);
for ( Header header : headers) {
LOG.info(header.toString());
}
byte[] dest=readDataset(fs,path,len);
LOG.info("Read dataset from " + path + ": data length ="+ len);
SwiftTestUtils.compareByteArrays(src,dest,len);
FileStatus status;
final Path qualifiedPath=path.makeQualified(fs);
status=fs.getFileStatus(qualifiedPath);
BlockLocation[] locations=fs.getFileBlockLocations(status,0,len);
assertNotNull("Null getFileBlockLocations()",locations);
assertTrue("empty array returned for getFileBlockLocations()",locations.length > 0);
try {
validatePathLen(path,len);
}
catch ( AssertionError e) {
throw new AssumptionViolatedException(e,null);
}
}
finally {
IOUtils.closeStream(out);
}
}
APIUtilityVerifier EqualityVerifier
/**
* Test that when a partitioned file is overwritten by a smaller one,
* all the old partitioned files go away
* @throws Throwable
*/
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testOverwritePartitionedFile() throws Throwable {
final Path path=new Path("/test/testOverwritePartitionedFile");
final int len1=8192;
final byte[] src1=SwiftTestUtils.dataset(len1,'A','Z');
FSDataOutputStream out=fs.create(path,false,getBufferSize(),(short)1,1024);
out.write(src1,0,len1);
out.close();
long expected=getExpectedPartitionsWritten(len1,PART_SIZE_BYTES,false);
assertPartitionsWritten("initial upload",out,expected);
assertExists("Exists",path);
FileStatus status=fs.getFileStatus(path);
assertEquals("Length",len1,status.getLen());
final int len2=4095;
final byte[] src2=SwiftTestUtils.dataset(len2,'a','z');
out=fs.create(path,true,getBufferSize(),(short)1,1024);
out.write(src2,0,len2);
out.close();
status=fs.getFileStatus(path);
assertEquals("Length",len2,status.getLen());
byte[] dest=readDataset(fs,path,len2);
SwiftTestUtils.compareByteArrays(src2,dest,len2);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test sticks up a very large partitioned file and verifies that
* it comes back unchanged.
* @throws Throwable
*/
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testManyPartitionedFile() throws Throwable {
final Path path=new Path("/test/testManyPartitionedFile");
int len=PART_SIZE_BYTES * 15;
final byte[] src=SwiftTestUtils.dataset(len,32,144);
FSDataOutputStream out=fs.create(path,false,getBufferSize(),(short)1,BLOCK_SIZE);
out.write(src,0,src.length);
int expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,true);
out.close();
assertPartitionsWritten("write completed",out,expected);
assertEquals("too few bytes written",len,SwiftNativeFileSystem.getBytesWritten(out));
assertEquals("too few bytes uploaded",len,SwiftNativeFileSystem.getBytesUploaded(out));
byte[] dest=readDataset(fs,path,len);
SwiftTestUtils.compareByteArrays(src,dest,len);
FileStatus[] stats=fs.listStatus(path);
assertEquals("wrong entry count in " + SwiftTestUtils.dumpStats(path.toString(),stats),expected,stats.length);
}
APIUtilityVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* tests functionality for big files ( > 5Gb) upload
*/
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testFilePartUploadNoLengthCheck() throws IOException, URISyntaxException {
final Path path=new Path("/test/testFilePartUploadLengthCheck");
int len=8192;
final byte[] src=SwiftTestUtils.dataset(len,32,144);
FSDataOutputStream out=fs.create(path,false,getBufferSize(),(short)1,BLOCK_SIZE);
try {
int totalPartitionsToWrite=len / PART_SIZE_BYTES;
assertPartitionsWritten("Startup",out,0);
int firstWriteLen=2048;
out.write(src,0,firstWriteLen);
long expected=getExpectedPartitionsWritten(firstWriteLen,PART_SIZE_BYTES,false);
SwiftUtils.debug(LOG,"First write: predict %d partitions written",expected);
assertPartitionsWritten("First write completed",out,expected);
int remainder=len - firstWriteLen;
SwiftUtils.debug(LOG,"remainder: writing: %d bytes",remainder);
out.write(src,firstWriteLen,remainder);
expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,false);
assertPartitionsWritten("Remaining data",out,expected);
out.close();
expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,true);
assertPartitionsWritten("Stream closed",out,expected);
Header[] headers=fs.getStore().getObjectHeaders(path,true);
for ( Header header : headers) {
LOG.info(header.toString());
}
byte[] dest=readDataset(fs,path,len);
LOG.info("Read dataset from " + path + ": data length ="+ len);
SwiftTestUtils.compareByteArrays(src,dest,len);
FileStatus status=fs.getFileStatus(path);
BlockLocation[] locations=fs.getFileBlockLocations(status,0,len);
assertNotNull("Null getFileBlockLocations()",locations);
assertTrue("empty array returned for getFileBlockLocations()",locations.length > 0);
}
finally {
IOUtils.closeStream(out);
}
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testRenamePartitionedFile() throws Throwable {
Path src=new Path("/test/testRenamePartitionedFileSrc");
int len=data.length;
SwiftTestUtils.writeDataset(fs,src,data,len,1024,false);
assertExists("Exists",src);
String partOneName=SwiftUtils.partitionFilenameFromNumber(1);
Path srcPart=new Path(src,partOneName);
Path dest=new Path("/test/testRenamePartitionedFileDest");
Path destPart=new Path(src,partOneName);
assertExists("Partition Exists",srcPart);
fs.rename(src,dest);
assertPathExists(fs,"dest file missing",dest);
FileStatus status=fs.getFileStatus(dest);
assertEquals("Length of renamed file is wrong",len,status.getLen());
byte[] destData=readDataset(fs,dest,len);
SwiftTestUtils.compareByteArrays(data,destData,len);
String srcLs=SwiftTestUtils.ls(fs,src);
String destLs=SwiftTestUtils.ls(fs,dest);
assertPathDoesNotExist("deleted file still found in " + srcLs,src);
assertPathDoesNotExist("partition file still found in " + srcLs,srcPart);
}
APIUtilityVerifier EqualityVerifier
/**
* Read and write some JSON
* @throws IOException
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRWJson() throws IOException {
final String message="{" + " 'json': { 'i':43, 'b':true}," + " 's':'string'"+ "}";
final Path filePath=new Path("/test/file.json");
writeTextFile(fs,filePath,message,false);
String readJson=readBytesToString(fs,filePath,message.length());
assertEquals(message,readJson);
FileStatus status=fs.getFileStatus(filePath);
BlockLocation[] locations=fs.getFileBlockLocations(status,0,10);
}
APIUtilityVerifier EqualityVerifier
/**
* Read and write some XML
* @throws IOException
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRWXML() throws IOException {
final String message="" + " " + " string"+ " ";
final Path filePath=new Path("/test/file.xml");
writeTextFile(fs,filePath,message,false);
String read=readBytesToString(fs,filePath,message.length());
assertEquals(message,read);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRenameFile() throws Exception {
assumeRenameSupported();
final Path old=new Path("/test/alice/file");
final Path newPath=new Path("/test/bob/file");
fs.mkdirs(newPath.getParent());
final FSDataOutputStream fsDataOutputStream=fs.create(old);
final byte[] message="Some data".getBytes();
fsDataOutputStream.write(message);
fsDataOutputStream.close();
assertTrue(fs.exists(old));
rename(old,newPath,true,false,true);
final FSDataInputStream bobStream=fs.open(newPath);
final byte[] bytes=new byte[512];
final int read=bobStream.read(bytes);
bobStream.close();
final byte[] buffer=new byte[read];
System.arraycopy(bytes,0,buffer,0,read);
assertEquals(new String(message),new String(buffer));
}
APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier HybridVerifier
/**
* Rename a file into a directory
* @throws Exception
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRenameFileIntoExistingDirectory() throws Exception {
assumeRenameSupported();
Path src=path("/test/olddir/file");
createFile(src);
Path dst=path("/test/new/newdir");
fs.mkdirs(dst);
rename(src,dst,true,false,true);
Path newFile=path("/test/new/newdir/file");
if (!fs.exists(newFile)) {
String ls=ls(dst);
LOG.info(ls(path("/test/new")));
LOG.info(ls(path("/test/hadoop")));
fail("did not find " + newFile + " - directory: "+ ls);
}
assertTrue("Destination changed",fs.exists(path("/test/new/newdir/file")));
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testParseUrlPath() throws Exception {
final String pathString="swift://container.service1/home/user/files/file1";
final URI uri=new URI(pathString);
final Path path=new Path(pathString);
final SwiftObjectPath expected=SwiftObjectPath.fromPath(uri,path);
final SwiftObjectPath actual=new SwiftObjectPath(RestClientBindings.extractContainerName(uri),"/home/user/files/file1");
assertEquals(expected,actual);
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testParsePath() throws Exception {
final String pathString="/home/user/files/file1";
final Path path=new Path(pathString);
final URI uri=new URI("http://container.localhost");
final SwiftObjectPath expected=SwiftObjectPath.fromPath(uri,path);
final SwiftObjectPath actual=new SwiftObjectPath(RestClientBindings.extractContainerName(uri),pathString);
assertEquals(expected,actual);
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testParseAuthenticatedUrl() throws Exception {
final String pathString="swift://container.service1/v2/AUTH_00345h34l93459y4/home/tom/documents/finance.docx";
final URI uri=new URI(pathString);
final Path path=new Path(pathString);
final SwiftObjectPath expected=SwiftObjectPath.fromPath(uri,path);
final SwiftObjectPath actual=new SwiftObjectPath(RestClientBindings.extractContainerName(uri),"/home/tom/documents/finance.docx");
assertEquals(expected,actual);
}
APIUtilityVerifier UtilityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testPutAndDelete() throws Throwable {
assumeEnabled();
SwiftRestClient client=createClient();
client.authenticate();
Path path=new Path("restTestPutAndDelete");
SwiftObjectPath sobject=SwiftObjectPath.fromPath(serviceURI,path);
byte[] stuff=new byte[1];
stuff[0]='a';
client.upload(sobject,new ByteArrayInputStream(stuff),stuff.length);
Duration head=new Duration();
Header[] responseHeaders=client.headRequest("expect success",sobject,SwiftRestClient.NEWEST);
head.finished();
LOG.info("head request duration " + head);
for ( Header header : responseHeaders) {
LOG.info(header.toString());
}
client.delete(sobject);
try {
Header[] headers=client.headRequest("expect fail",sobject,SwiftRestClient.NEWEST);
Assert.fail("Expected deleted file, but object is still present: " + sobject);
}
catch ( FileNotFoundException e) {
}
for ( DurationStats stats : client.getOperationStatistics()) {
LOG.info(stats);
}
}
APIUtilityVerifier IterativeVerifier EqualityVerifier
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testScaledWriteThenRead() throws Throwable {
Path dir=new Path("/test/manysmallfiles");
Duration rm1=new Duration();
fs.delete(dir,true);
rm1.finished();
fs.mkdirs(dir);
Duration ls1=new Duration();
fs.listStatus(dir);
ls1.finished();
long count=getOperationCount();
SwiftTestUtils.noteAction("Beginning Write of " + count + " files ");
DurationStats writeStats=new DurationStats("write");
DurationStats readStats=new DurationStats("read");
String format="%08d";
for (long l=0; l < count; l++) {
String name=String.format(format,l);
Path p=new Path(dir,"part-" + name);
Duration d=new Duration();
SwiftTestUtils.writeTextFile(fs,p,name,false);
d.finished();
writeStats.add(d);
Thread.sleep(1000);
}
SwiftTestUtils.noteAction("Beginning ls");
Duration ls2=new Duration();
FileStatus[] status2=(FileStatus[])fs.listStatus(dir);
ls2.finished();
assertEquals("Not enough entries in the directory",count,status2.length);
SwiftTestUtils.noteAction("Beginning read");
for (long l=0; l < count; l++) {
String name=String.format(format,l);
Path p=new Path(dir,"part-" + name);
Duration d=new Duration();
String result=SwiftTestUtils.readBytesToString(fs,p,name.length());
assertEquals(name,result);
d.finished();
readStats.add(d);
}
SwiftTestUtils.noteAction("Beginning delete");
Duration rm2=new Duration();
fs.delete(dir,true);
rm2.finished();
LOG.info(String.format("'filesystem','%s'",fs.getUri()));
LOG.info(writeStats.toString());
LOG.info(readStats.toString());
LOG.info(String.format("'rm1',%d,'ls1',%d",rm1.value(),ls1.value()));
LOG.info(String.format("'rm2',%d,'ls2',%d",rm2.value(),ls2.value()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testList() throws IOException {
FileStatus fs=fSys.getFileStatus(new Path("/"));
Assert.assertTrue(fs.isDirectory());
Assert.assertEquals(fs.getPath(),chrootedTo);
FileStatus[] dirPaths=fSys.listStatus(new Path("/"));
Assert.assertEquals(0,dirPaths.length);
fileSystemTestHelper.createFile(fSys,"/foo");
fileSystemTestHelper.createFile(fSys,"/bar");
fSys.mkdirs(new Path("/dirX"));
fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys,"/dirY"));
fSys.mkdirs(new Path("/dirX/dirXX"));
dirPaths=fSys.listStatus(new Path("/"));
Assert.assertEquals(4,dirPaths.length);
fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"foo"),dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isFile());
fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"bar"),dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isFile());
fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"dirX"),dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isDirectory());
fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"dirY"),dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isDirectory());
}
APIUtilityVerifier InternalCallVerifier NullVerifier
/**
* Regression test for HADOOP-8408.
*/
@Test public void testGetCanonicalServiceNameWithNonDefaultMountTable() throws URISyntaxException, IOException {
Configuration conf=new Configuration();
ConfigUtil.addLink(conf,MOUNT_TABLE_NAME,"/user",new URI("file:///"));
FileSystem viewFs=FileSystem.get(new URI(FsConstants.VIEWFS_SCHEME + "://" + MOUNT_TABLE_NAME),conf);
String serviceName=viewFs.getCanonicalServiceName();
assertNull(serviceName);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAddDelegationTokens() throws Exception {
Credentials creds=new Credentials();
Token> fs1Tokens[]=addTokensWithCreds(fs1,creds);
assertEquals(1,fs1Tokens.length);
assertEquals(1,creds.numberOfTokens());
Token> fs2Tokens[]=addTokensWithCreds(fs2,creds);
assertEquals(1,fs2Tokens.length);
assertEquals(2,creds.numberOfTokens());
Credentials savedCreds=creds;
creds=new Credentials();
Token> viewFsTokens[]=viewFs.addDelegationTokens("me",creds);
assertEquals(2,viewFsTokens.length);
assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens()));
assertEquals(savedCreds.numberOfTokens(),creds.numberOfTokens());
viewFsTokens=viewFs.addDelegationTokens("me",creds);
assertEquals(0,viewFsTokens.length);
assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens()));
assertEquals(savedCreds.numberOfTokens(),creds.numberOfTokens());
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testGetCanonicalServiceNameWithDefaultMountTable() throws URISyntaxException, IOException {
Configuration conf=new Configuration();
ConfigUtil.addLink(conf,"/user",new URI("file:///"));
FileSystem viewFs=FileSystem.get(FsConstants.VIEWFS_URI,conf);
String serviceName=viewFs.getCanonicalServiceName();
assertNull(serviceName);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFileStatusSerialziation() throws IOException, URISyntaxException {
String testfilename="testFileStatusSerialziation";
TEST_DIR.mkdirs();
File infile=new File(TEST_DIR,testfilename);
final byte[] content="dingos".getBytes();
FileOutputStream fos=null;
try {
fos=new FileOutputStream(infile);
fos.write(content);
}
finally {
if (fos != null) {
fos.close();
}
}
assertEquals((long)content.length,infile.length());
Configuration conf=new Configuration();
ConfigUtil.addLink(conf,"/foo/bar/baz",TEST_DIR.toURI());
FileSystem vfs=FileSystem.get(FsConstants.VIEWFS_URI,conf);
assertEquals(ViewFileSystem.class,vfs.getClass());
FileStatus stat=vfs.getFileStatus(new Path("/foo/bar/baz",testfilename));
assertEquals(content.length,stat.getLen());
DataOutputBuffer dob=new DataOutputBuffer();
stat.write(dob);
DataInputBuffer dib=new DataInputBuffer();
dib.reset(dob.getData(),0,dob.getLength());
FileStatus deSer=new FileStatus();
deSer.readFields(dib);
assertEquals(content.length,deSer.getLen());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that automatic failover won't run against a target that hasn't
* explicitly enabled the feature.
*/
@Test(timeout=10000) public void testWontRunWhenAutoFailoverDisabled() throws Exception {
DummyHAService svc=cluster.getService(1);
svc=Mockito.spy(svc);
Mockito.doReturn(false).when(svc).isAutoFailoverEnabled();
assertEquals(ZKFailoverController.ERR_CODE_AUTO_FAILOVER_NOT_ENABLED,runFC(svc,"-formatZK"));
assertEquals(ZKFailoverController.ERR_CODE_AUTO_FAILOVER_NOT_ENABLED,runFC(svc));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAbandonBlock() throws IOException {
String src=FILE_NAME_PREFIX + "foo";
FSDataOutputStream fout=fs.create(new Path(src),true,4096,(short)1,512L);
for (int i=0; i < 1024; i++) {
fout.write(123);
}
fout.hflush();
long fileId=((DFSOutputStream)fout.getWrappedStream()).getFileId();
DFSClient dfsclient=DFSClientAdapter.getDFSClient(fs);
LocatedBlocks blocks=dfsclient.getNamenode().getBlockLocations(src,0,Integer.MAX_VALUE);
int orginalNumBlocks=blocks.locatedBlockCount();
LocatedBlock b=blocks.getLastLocatedBlock();
dfsclient.getNamenode().abandonBlock(b.getBlock(),fileId,src,dfsclient.clientName);
dfsclient.getNamenode().abandonBlock(b.getBlock(),fileId,src,dfsclient.clientName);
fout.close();
cluster.restartNameNode();
blocks=dfsclient.getNamenode().getBlockLocations(src,0,Integer.MAX_VALUE);
Assert.assertEquals("Blocks " + b + " has not been abandoned.",orginalNumBlocks,blocks.locatedBlockCount() + 1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier
/**
* Test shutting down the ShortCircuitCache while there are things in it.
*/
@Test public void testShortCircuitCacheShutdown() throws Exception {
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testShortCircuitCacheShutdown",sockDir);
conf.set(DFS_CLIENT_CONTEXT,"testShortCircuitCacheShutdown");
Configuration serverConf=new Configuration(conf);
DFSInputStream.tcpReadsDisabledForTesting=true;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),conf);
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADEC;
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE));
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache();
cache.close();
Assert.assertTrue(cache.getDfsClientShmManager().getDomainSocketWatcher().isClosed());
cluster.shutdown();
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* When an InterruptedException is sent to a thread calling
* FileChannel#read, the FileChannel is immediately closed and the
* thread gets an exception. This effectively means that we might have
* someone asynchronously calling close() on the file descriptors we use
* in BlockReaderLocal. So when unreferencing a ShortCircuitReplica in
* ShortCircuitCache#unref, we should check if the FileChannel objects
* are still open. If not, we should purge the replica to avoid giving
* it out to any future readers.
* This is a regression test for HDFS-6227: Short circuit read failed
* due to ClosedChannelException.
* Note that you may still get ClosedChannelException errors if two threads
* are reading from the same replica and an InterruptedException is delivered
* to one of them.
*/
@Test(timeout=120000) public void testPurgingClosedReplicas() throws Exception {
BlockReaderTestUtil.enableBlockReaderFactoryTracing();
final AtomicInteger replicasCreated=new AtomicInteger(0);
final AtomicBoolean testFailed=new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting=true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback=new ShortCircuitCache.ShortCircuitReplicaCreator(){
@Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){
replicasCreated.incrementAndGet();
return null;
}
}
;
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testPurgingClosedReplicas",sockDir);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4095;
final int SEED=0xFADE0;
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),conf);
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
final Semaphore sem=new Semaphore(0);
final List locatedBlocks=cluster.getNameNode().getRpcServer().getBlockLocations(TEST_FILE,0,TEST_FILE_LEN).getLocatedBlocks();
final LocatedBlock lblock=locatedBlocks.get(0);
final byte[] buf=new byte[TEST_FILE_LEN];
Runnable readerRunnable=new Runnable(){
@Override public void run(){
try {
while (true) {
BlockReader blockReader=null;
try {
blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN);
sem.release();
try {
blockReader.readAll(buf,0,TEST_FILE_LEN);
}
finally {
sem.acquireUninterruptibly();
}
}
catch ( ClosedByInterruptException e) {
LOG.info("got the expected ClosedByInterruptException",e);
sem.release();
break;
}
finally {
if (blockReader != null) blockReader.close();
}
LOG.info("read another " + TEST_FILE_LEN + " bytes.");
}
}
catch ( Throwable t) {
LOG.error("getBlockReader failure",t);
testFailed.set(true);
sem.release();
}
}
}
;
Thread thread=new Thread(readerRunnable);
thread.start();
while (thread.isAlive()) {
sem.acquireUninterruptibly();
thread.interrupt();
sem.release();
}
Assert.assertFalse(testFailed.get());
BlockReader blockReader=null;
try {
blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN);
blockReader.readFully(buf,0,TEST_FILE_LEN);
}
finally {
if (blockReader != null) blockReader.close();
}
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(buf,expected));
Assert.assertEquals(2,replicasCreated.get());
dfs.close();
cluster.shutdown();
sockDir.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* If we have a UNIX domain socket configured,
* and we have dfs.client.domain.socket.data.traffic set to true,
* and short-circuit access fails, we should still be able to pass
* data traffic over the UNIX domain socket. Test this.
*/
@Test(timeout=60000) public void testFallbackFromShortCircuitToUnixDomainTraffic() throws Exception {
DFSInputStream.tcpReadsDisabledForTesting=true;
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration clientConf=createShortCircuitConf("testFallbackFromShortCircuitToUnixDomainTraffic",sockDir);
clientConf.set(DFS_CLIENT_CONTEXT,"testFallbackFromShortCircuitToUnixDomainTraffic_clientContext");
clientConf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,true);
Configuration serverConf=new Configuration(clientConf);
serverConf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_KEY,false);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
FileSystem dfs=FileSystem.get(cluster.getURI(0),clientConf);
String TEST_FILE="/test_file";
final int TEST_FILE_LEN=8193;
final int SEED=0xFADED;
DFSTestUtil.createFile(dfs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(dfs,new Path(TEST_FILE));
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
cluster.shutdown();
sockDir.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that a client which supports short-circuit reads using
* shared memory can fall back to not using shared memory when
* the server doesn't support it.
*/
@Test public void testShortCircuitReadFromServerWithoutShm() throws Exception {
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration clientConf=createShortCircuitConf("testShortCircuitReadFromServerWithoutShm",sockDir);
Configuration serverConf=new Configuration(clientConf);
serverConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,0);
DFSInputStream.tcpReadsDisabledForTesting=true;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.set(DFS_CLIENT_CONTEXT,"testShortCircuitReadFromServerWithoutShm_clientContext");
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),clientConf);
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADEC;
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE));
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache();
final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertEquals(1,info.size());
PerDatanodeVisitorInfo vinfo=info.get(datanode);
Assert.assertTrue(vinfo.disabled);
Assert.assertEquals(0,vinfo.full.size());
Assert.assertEquals(0,vinfo.notFull.size());
}
}
);
cluster.shutdown();
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test the case where we have multiple threads waiting on the
* ShortCircuitCache delivering a certain ShortCircuitReplica.
* In this case, there should only be one call to
* createShortCircuitReplicaInfo. This one replica should be shared
* by all threads.
*/
@Test(timeout=60000) public void testMultipleWaitersOnShortCircuitCache() throws Exception {
final CountDownLatch latch=new CountDownLatch(1);
final AtomicBoolean creationIsBlocked=new AtomicBoolean(true);
final AtomicBoolean testFailed=new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting=true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback=new ShortCircuitCache.ShortCircuitReplicaCreator(){
@Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){
Uninterruptibles.awaitUninterruptibly(latch);
if (!creationIsBlocked.compareAndSet(true,false)) {
Assert.fail("there were multiple calls to " + "createShortCircuitReplicaInfo. Only one was expected.");
}
return null;
}
}
;
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testMultipleWaitersOnShortCircuitCache",sockDir);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADED;
final int NUM_THREADS=10;
DFSTestUtil.createFile(dfs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
Runnable readerRunnable=new Runnable(){
@Override public void run(){
try {
byte contents[]=DFSTestUtil.readFileBuffer(dfs,new Path(TEST_FILE));
Assert.assertFalse(creationIsBlocked.get());
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
}
catch ( Throwable e) {
LOG.error("readerRunnable error",e);
testFailed.set(true);
}
}
}
;
Thread threads[]=new Thread[NUM_THREADS];
for (int i=0; i < NUM_THREADS; i++) {
threads[i]=new Thread(readerRunnable);
threads[i].start();
}
Thread.sleep(500);
latch.countDown();
for (int i=0; i < NUM_THREADS; i++) {
Uninterruptibles.joinUninterruptibly(threads[i]);
}
cluster.shutdown();
sockDir.close();
Assert.assertFalse(testFailed.get());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test that a client which does not support short-circuit reads using
* shared memory can talk with a server which supports it.
*/
@Test public void testShortCircuitReadFromClientWithoutShm() throws Exception {
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration clientConf=createShortCircuitConf("testShortCircuitReadWithoutShm",sockDir);
Configuration serverConf=new Configuration(clientConf);
DFSInputStream.tcpReadsDisabledForTesting=true;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,0);
clientConf.set(DFS_CLIENT_CONTEXT,"testShortCircuitReadFromClientWithoutShm_clientContext");
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),clientConf);
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADEC;
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE));
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache();
Assert.assertEquals(null,cache.getDfsClientShmManager());
cluster.shutdown();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that, in the case of an error, the position and limit of a ByteBuffer
* are left unchanged. This is not mandated by ByteBufferReadable, but clients
* of this class might immediately issue a retry on failure, so it's polite.
*/
@Test public void testStablePositionAfterCorruptRead() throws Exception {
final short REPL_FACTOR=1;
final long FILE_LENGTH=512L;
HdfsConfiguration conf=getConfiguration(null);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/corrupted");
DFSTestUtil.createFile(fs,path,FILE_LENGTH,REPL_FACTOR,12345L);
DFSTestUtil.waitReplication(fs,path,REPL_FACTOR);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,path);
int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block);
assertEquals("All replicas not corrupted",REPL_FACTOR,blockFilesCorrupted);
FSDataInputStream dis=cluster.getFileSystem().open(path);
ByteBuffer buf=ByteBuffer.allocateDirect((int)FILE_LENGTH);
boolean sawException=false;
try {
dis.read(buf);
}
catch ( ChecksumException ex) {
sawException=true;
}
assertTrue(sawException);
assertEquals(0,buf.position());
assertEquals(buf.capacity(),buf.limit());
dis=cluster.getFileSystem().open(path);
buf.position(3);
buf.limit(25);
sawException=false;
try {
dis.read(buf);
}
catch ( ChecksumException ex) {
sawException=true;
}
assertTrue(sawException);
assertEquals(3,buf.position());
assertEquals(25,buf.limit());
cluster.shutdown();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testBlocksScheduledCounter() throws IOException {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
FSDataOutputStream out=fs.create(new Path("/testBlockScheduledCounter"));
for (int i=0; i < 1024; i++) {
out.write(i);
}
out.hflush();
ArrayList dnList=new ArrayList();
final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
dm.fetchDatanodes(dnList,dnList,false);
DatanodeDescriptor dn=dnList.get(0);
assertEquals(1,dn.getBlocksScheduled());
out.close();
assertEquals(0,dn.getBlocksScheduled());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testDFSAddressConfig() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
ArrayList dns=cluster.getDataNodes();
DataNode dn=dns.get(0);
String selfSocketAddr=dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
for (int i=0; i < dns.size(); i++) {
DataNodeProperties dnp=cluster.stopDataNode(i);
assertNotNull("Should have been able to stop simulated datanode",dnp);
}
conf.unset(DFS_DATANODE_ADDRESS_KEY);
conf.unset(DFS_DATANODE_HTTP_ADDRESS_KEY);
conf.unset(DFS_DATANODE_IPC_ADDRESS_KEY);
cluster.startDataNodes(conf,1,true,StartupOption.REGULAR,null,null,null,false,true);
dns=cluster.getDataNodes();
dn=dns.get(0);
selfSocketAddr=dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
for (int i=0; i < dns.size(); i++) {
DataNodeProperties dnp=cluster.stopDataNode(i);
assertNotNull("Should have been able to stop simulated datanode",dnp);
}
conf.set(DFS_DATANODE_ADDRESS_KEY,"0.0.0.0:0");
conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY,"0.0.0.0:0");
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,"0.0.0.0:0");
cluster.startDataNodes(conf,1,true,StartupOption.REGULAR,null,null,null,false,true);
dns=cluster.getDataNodes();
dn=dns.get(0);
selfSocketAddr=dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
assertTrue(selfSocketAddr.contains("/0.0.0.0:"));
cluster.shutdown();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that client failover works when an active NN dies and the standby
* takes over.
*/
@Test public void testDfsClientFailover() throws IOException, URISyntaxException {
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
DFSTestUtil.createFile(fs,TEST_FILE,FILE_LENGTH_TO_VERIFY,(short)1,1L);
assertEquals(fs.getFileStatus(TEST_FILE).getLen(),FILE_LENGTH_TO_VERIFY);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
assertEquals(fs.getFileStatus(TEST_FILE).getLen(),FILE_LENGTH_TO_VERIFY);
Path withPort=new Path("hdfs://" + HATestUtil.getLogicalHostname(cluster) + ":"+ NameNode.DEFAULT_PORT+ "/"+ TEST_FILE.toUri().getPath());
FileSystem fs2=withPort.getFileSystem(fs.getConf());
assertTrue(fs2.exists(withPort));
fs.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test to verify legacy proxy providers are correctly wrapped.
*/
@Test public void testWrappedFailoverProxyProvider() throws Exception {
Configuration config=new HdfsConfiguration(conf);
String logicalName=HATestUtil.getLogicalHostname(cluster);
HATestUtil.setFailoverConfigurations(cluster,config,logicalName);
config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,DummyLegacyFailoverProxyProvider.class.getName());
Path p=new Path("hdfs://" + logicalName + "/");
SecurityUtil.setTokenServiceUseIp(false);
assertTrue("Legacy proxy providers should use logical URI.",HAUtil.useLogicalUri(config,p.toUri()));
}
APIUtilityVerifier BooleanVerifier
/**
* Test that a DFSClient waits for random time before retry on busy blocks.
*/
@Test public void testDFSClientRetriesOnBusyBlocks() throws IOException {
System.out.println("Testing DFSClient random waiting on busy blocks.");
int xcievers=2;
int fileLen=6 * 1024 * 1024;
int threads=50;
int retries=3;
int timeWin=300;
long timestamp=Time.now();
boolean pass=busyTest(xcievers,threads,fileLen,timeWin,retries);
long timestamp2=Time.now();
if (pass) {
LOG.info("Test 1 succeeded! Time spent: " + (timestamp2 - timestamp) / 1000.0 + " sec.");
}
else {
LOG.warn("Test 1 failed, but relax. Time spent: " + (timestamp2 - timestamp) / 1000.0 + " sec.");
}
retries=50;
timestamp=Time.now();
pass=busyTest(xcievers,threads,fileLen,timeWin,retries);
timestamp2=Time.now();
assertTrue("Something wrong! Test 2 got Exception with maxmum retries!",pass);
LOG.info("Test 2 succeeded! Time spent: " + (timestamp2 - timestamp) / 1000.0 + " sec.");
retries=3;
timeWin=1000;
timestamp=Time.now();
pass=busyTest(xcievers,threads,fileLen,timeWin,retries);
timestamp2=Time.now();
if (pass) {
LOG.info("Test 3 succeeded! Time spent: " + (timestamp2 - timestamp) / 1000.0 + " sec.");
}
else {
LOG.warn("Test 3 failed, but relax. Time spent: " + (timestamp2 - timestamp) / 1000.0 + " sec.");
}
retries=50;
timeWin=1000;
timestamp=Time.now();
pass=busyTest(xcievers,threads,fileLen,timeWin,retries);
timestamp2=Time.now();
assertTrue("Something wrong! Test 4 got Exception with maxmum retries!",pass);
LOG.info("Test 4 succeeded! Time spent: " + (timestamp2 - timestamp) / 1000.0 + " sec.");
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that getAdditionalBlock() and close() are idempotent. This allows
* a client to safely retry a call and still produce a correct
* file. See HDFS-3031.
*/
@Test public void testIdempotentAllocateBlockAndClose() throws Exception {
final String src="/testIdempotentAllocateBlock";
Path file=new Path(src);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,4096);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
NamenodeProtocols preSpyNN=cluster.getNameNodeRpc();
NamenodeProtocols spyNN=spy(preSpyNN);
DFSClient client=new DFSClient(null,spyNN,conf,null);
doAnswer(new Answer(){
@Override public LocatedBlock answer( InvocationOnMock invocation) throws Throwable {
LocatedBlock ret=(LocatedBlock)invocation.callRealMethod();
LocatedBlocks lb=cluster.getNameNodeRpc().getBlockLocations(src,0,Long.MAX_VALUE);
int blockCount=lb.getLocatedBlocks().size();
assertEquals(lb.getLastLocatedBlock().getBlock(),ret.getBlock());
LocatedBlock ret2=(LocatedBlock)invocation.callRealMethod();
lb=cluster.getNameNodeRpc().getBlockLocations(src,0,Long.MAX_VALUE);
int blockCount2=lb.getLocatedBlocks().size();
assertEquals(lb.getLastLocatedBlock().getBlock(),ret2.getBlock());
assertEquals(blockCount,blockCount2);
return ret2;
}
}
).when(spyNN).addBlock(Mockito.anyString(),Mockito.anyString(),Mockito.any(),Mockito.any(),Mockito.anyLong(),Mockito.any());
doAnswer(new Answer(){
@Override public Boolean answer( InvocationOnMock invocation) throws Throwable {
LOG.info("Called complete(: " + Joiner.on(",").join(invocation.getArguments()) + ")");
if (!(Boolean)invocation.callRealMethod()) {
LOG.info("Complete call returned false, not faking a retry RPC");
return false;
}
try {
boolean ret=(Boolean)invocation.callRealMethod();
LOG.info("Complete call returned true, faked second RPC. " + "Returned: " + ret);
return ret;
}
catch ( Throwable t) {
LOG.error("Idempotent retry threw exception",t);
throw t;
}
}
}
).when(spyNN).complete(Mockito.anyString(),Mockito.anyString(),Mockito.any(),anyLong());
OutputStream stm=client.create(file.toString(),true);
try {
AppendTestUtil.write(stm,0,10000);
stm.close();
stm=null;
}
finally {
IOUtils.cleanup(LOG,stm);
}
Mockito.verify(spyNN,Mockito.atLeastOnce()).addBlock(Mockito.anyString(),Mockito.anyString(),Mockito.any(),Mockito.any(),Mockito.anyLong(),Mockito.any());
Mockito.verify(spyNN,Mockito.atLeastOnce()).complete(Mockito.anyString(),Mockito.anyString(),Mockito.any(),anyLong());
AppendTestUtil.check(fs,file,10000);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test that checksum failures are recovered from by the next read on the same
* DFSInputStream. Corruption information is not persisted from read call to
* read call, so the client should expect consecutive calls to behave the same
* way. See HDFS-3067.
*/
@Test public void testRetryOnChecksumFailure() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
final short REPL_FACTOR=1;
final long FILE_LENGTH=512L;
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/corrupted");
DFSTestUtil.createFile(fs,path,FILE_LENGTH,REPL_FACTOR,12345L);
DFSTestUtil.waitReplication(fs,path,REPL_FACTOR);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,path);
int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block);
assertEquals("All replicas not corrupted",REPL_FACTOR,blockFilesCorrupted);
InetSocketAddress nnAddr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(nnAddr,conf);
DFSInputStream dis=client.open(path.toString());
byte[] arr=new byte[(int)FILE_LENGTH];
for (int i=0; i < 2; ++i) {
try {
dis.read(arr,0,(int)FILE_LENGTH);
fail("Expected ChecksumException not thrown");
}
catch ( Exception ex) {
GenericTestUtils.assertExceptionContains("Checksum error",ex);
}
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* This tests that DFSInputStream failures are counted for a given read
* operation, and not over the lifetime of the stream. It is a regression
* test for HDFS-127.
*/
@Test public void testFailuresArePerOperation() throws Exception {
long fileSize=4096;
Path file=new Path("/testFile");
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,2 * 1000);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
NamenodeProtocols preSpyNN=cluster.getNameNodeRpc();
NamenodeProtocols spyNN=spy(preSpyNN);
DFSClient client=new DFSClient(null,spyNN,conf,null);
int maxBlockAcquires=client.getMaxBlockAcquireFailures();
assertTrue(maxBlockAcquires > 0);
DFSTestUtil.createFile(fs,file,fileSize,(short)1,12345L);
doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires + 1)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong());
try {
IOUtils.copyBytes(client.open(file.toString()),new IOUtils.NullOutputStream(),conf,true);
fail("Didn't get exception");
}
catch ( IOException ioe) {
DFSClient.LOG.info("Got expected exception",ioe);
}
doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong());
IOUtils.copyBytes(client.open(file.toString()),new IOUtils.NullOutputStream(),conf,true);
DFSClient.LOG.info("Starting test case for failure reset");
doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong());
DFSInputStream is=client.open(file.toString());
byte buf[]=new byte[10];
IOUtils.readFully(is,buf,0,buf.length);
DFSClient.LOG.info("First read successful after some failures.");
doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong());
is.openInfo();
is.seek(0);
IOUtils.readFully(is,buf,0,buf.length);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* The close() method of DFSOutputStream should never throw the same exception
* twice. See HDFS-5335 for details.
*/
@Test public void testCloseTwice() throws IOException {
DistributedFileSystem fs=cluster.getFileSystem();
FSDataOutputStream os=fs.create(new Path("/test"));
DFSOutputStream dos=(DFSOutputStream)Whitebox.getInternalState(os,"wrappedStream");
@SuppressWarnings("unchecked") AtomicReference ex=(AtomicReference)Whitebox.getInternalState(dos,"lastException");
Assert.assertEquals(null,ex.get());
dos.close();
IOException dummy=new IOException("dummy");
ex.set(dummy);
try {
dos.close();
}
catch ( IOException e) {
Assert.assertEquals(e,dummy);
}
Assert.assertEquals(null,ex.get());
dos.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRemove() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
FileSystem fs=cluster.getFileSystem();
assertTrue(fs.mkdirs(dir));
long dfsUsedStart=getTotalDfsUsed(cluster);
{
final int fileCount=100;
for (int i=0; i < fileCount; i++) {
Path a=new Path(dir,"a" + i);
createFile(fs,a);
}
long dfsUsedMax=getTotalDfsUsed(cluster);
for (int i=0; i < fileCount; i++) {
Path a=new Path(dir,"a" + i);
fs.delete(a,false);
}
Thread.sleep(3 * DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000);
long dfsUsedFinal=getTotalDfsUsed(cluster);
assertEquals("All blocks should be gone. start=" + dfsUsedStart + " max="+ dfsUsedMax+ " final="+ dfsUsedFinal,dfsUsedStart,dfsUsedFinal);
}
fs.delete(dir,true);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Check the blocks of dst file are cleaned after rename with overwrite
*/
@Test(timeout=120000) public void testRenameWithOverwrite() throws Exception {
final short replFactor=2;
final long blockSize=512;
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(replFactor).build();
DistributedFileSystem dfs=cluster.getFileSystem();
try {
long fileLen=blockSize * 3;
String src="/foo/src";
String dst="/foo/dst";
Path srcPath=new Path(src);
Path dstPath=new Path(dst);
DFSTestUtil.createFile(dfs,srcPath,fileLen,replFactor,1);
DFSTestUtil.createFile(dfs,dstPath,fileLen,replFactor,1);
LocatedBlocks lbs=NameNodeAdapter.getBlockLocations(cluster.getNameNode(),dst,0,fileLen);
BlockManager bm=NameNodeAdapter.getNamesystem(cluster.getNameNode()).getBlockManager();
assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().getLocalBlock()) != null);
dfs.rename(srcPath,dstPath,Rename.OVERWRITE);
assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().getLocalBlock()) == null);
}
finally {
if (dfs != null) {
dfs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* This test attempts to rollback the NameNode and DataNode under
* a number of valid and invalid conditions.
*/
@Test public void testRollback() throws Exception {
File[] baseDirs;
UpgradeUtilities.initialize();
StorageInfo storageInfo=null;
for (int numDirs=1; numDirs <= 2; numDirs++) {
conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,-1);
conf=UpgradeUtilities.initializeStorageStateConf(numDirs,conf);
String[] nameNodeDirs=conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
String[] dataNodeDirs=conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
log("Normal NameNode rollback",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
NameNode.doRollback(conf,false);
checkResult(NAME_NODE,nameNodeDirs);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("Normal DataNode rollback",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
NameNode.doRollback(conf,false);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"previous");
cluster.startDataNodes(conf,1,false,StartupOption.ROLLBACK,null);
checkResult(DATA_NODE,dataNodeDirs);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("Normal BlockPool rollback",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
NameNode.doRollback(conf,false);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs,"current",UpgradeUtilities.getCurrentBlockPoolID(cluster));
UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs,"previous",UpgradeUtilities.getCurrentBlockPoolID(cluster));
storageInfo=new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION - 1,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),UpgradeUtilities.getCurrentFsscTime(cluster),NodeType.DATA_NODE);
File[] dataCurrentDirs=new File[dataNodeDirs.length];
for (int i=0; i < dataNodeDirs.length; i++) {
dataCurrentDirs[i]=new File((new Path(dataNodeDirs[i] + "/current")).toString());
}
UpgradeUtilities.createDataNodeVersionFile(dataCurrentDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster));
cluster.startDataNodes(conf,1,false,StartupOption.ROLLBACK,null);
assertTrue(cluster.isDataNodeUp());
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode rollback without existing previous dir",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
startNameNodeShouldFail("None of the storage directories contain previous fs state");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("DataNode rollback without existing previous dir",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).startupOption(StartupOption.UPGRADE).build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
cluster.startDataNodes(conf,1,false,StartupOption.ROLLBACK,null);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("DataNode rollback with future stored layout version in previous",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
NameNode.doRollback(conf,false);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
baseDirs=UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"previous");
storageInfo=new StorageInfo(Integer.MIN_VALUE,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),UpgradeUtilities.getCurrentFsscTime(cluster),NodeType.DATA_NODE);
UpgradeUtilities.createDataNodeVersionFile(baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster));
startBlockPoolShouldFail(StartupOption.ROLLBACK,cluster.getNamesystem().getBlockPoolId());
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("DataNode rollback with newer fsscTime in previous",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
NameNode.doRollback(conf,false);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
baseDirs=UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"previous");
storageInfo=new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),Long.MAX_VALUE,NodeType.DATA_NODE);
UpgradeUtilities.createDataNodeVersionFile(baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster));
startBlockPoolShouldFail(StartupOption.ROLLBACK,cluster.getNamesystem().getBlockPoolId());
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode rollback with no edits file",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
deleteMatchingFiles(baseDirs,"edits.*");
startNameNodeShouldFail("Gap in transactions");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with no image file",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
deleteMatchingFiles(baseDirs,"fsimage_.*");
startNameNodeShouldFail("No valid image files found");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with corrupt version file",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
for ( File f : baseDirs) {
UpgradeUtilities.corruptFile(new File(f,"VERSION"),"layoutVersion".getBytes(Charsets.UTF_8),"xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
}
startNameNodeShouldFail("file VERSION has layoutVersion missing");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with old layout version in previous",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
storageInfo=new StorageInfo(1,UpgradeUtilities.getCurrentNamespaceID(null),UpgradeUtilities.getCurrentClusterID(null),UpgradeUtilities.getCurrentFsscTime(null),NodeType.NAME_NODE);
UpgradeUtilities.createNameNodeVersionFile(conf,baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster));
startNameNodeShouldFail("Cannot rollback to storage version 1 using this version");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier ConditionMatcher HybridVerifier
@Test(timeout=300000) public void testAppendToFile() throws Exception {
final int inputFileLength=1024 * 1024;
File testRoot=new File(TEST_ROOT_DIR,"testAppendtoFileDir");
testRoot.mkdirs();
File file1=new File(testRoot,"file1");
File file2=new File(testRoot,"file2");
createLocalFileWithRandomData(inputFileLength,file1);
createLocalFileWithRandomData(inputFileLength,file2);
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem dfs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + dfs.getUri(),dfs instanceof DistributedFileSystem);
Path remoteFile=new Path("/remoteFile");
FsShell shell=new FsShell();
shell.setConf(conf);
String[] argv=new String[]{"-appendToFile",file1.toString(),file2.toString(),remoteFile.toString()};
int res=ToolRunner.run(shell,argv);
assertThat(res,is(0));
assertThat(dfs.getFileStatus(remoteFile).getLen(),is((long)inputFileLength * 2));
res=ToolRunner.run(shell,argv);
assertThat(res,is(0));
assertThat(dfs.getFileStatus(remoteFile).getLen(),is((long)inputFileLength * 4));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testGet() throws IOException {
DFSTestUtil.setLogLevel2All(FSInputChecker.LOG);
final String fname="testGet.txt";
Path root=new Path("/test/get");
final Path remotef=new Path(root,fname);
final Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
TestGetRunner runner=new TestGetRunner(){
private int count=0;
private final FsShell shell=new FsShell(conf);
public String run( int exitcode, String... options) throws IOException {
String dst=new File(TEST_ROOT_DIR,fname + ++count).getAbsolutePath();
String[] args=new String[options.length + 3];
args[0]="-get";
args[args.length - 2]=remotef.toString();
args[args.length - 1]=dst;
for (int i=0; i < options.length; i++) {
args[i + 1]=options[i];
}
show("args=" + Arrays.asList(args));
try {
assertEquals(exitcode,shell.run(args));
}
catch ( Exception e) {
assertTrue(StringUtils.stringifyException(e),false);
}
return exitcode == 0 ? DFSTestUtil.readFile(new File(dst)) : null;
}
}
;
File localf=createLocalFile(new File(TEST_ROOT_DIR,fname));
MiniDFSCluster cluster=null;
DistributedFileSystem dfs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
dfs=cluster.getFileSystem();
mkdir(dfs,root);
dfs.copyFromLocalFile(false,false,new Path(localf.getPath()),remotef);
String localfcontent=DFSTestUtil.readFile(localf);
assertEquals(localfcontent,runner.run(0));
assertEquals(localfcontent,runner.run(0,"-ignoreCrc"));
List files=getBlockFiles(cluster);
dfs.close();
cluster.shutdown();
show("files=" + files);
corrupt(files);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).format(false).build();
dfs=cluster.getFileSystem();
assertEquals(null,runner.run(1));
String corruptedcontent=runner.run(0,"-ignoreCrc");
assertEquals(localfcontent.substring(1),corruptedcontent.substring(1));
assertEquals(localfcontent.charAt(0) + 1,corruptedcontent.charAt(0));
}
finally {
if (null != dfs) {
try {
dfs.close();
}
catch ( Exception e) {
}
}
if (null != cluster) {
cluster.shutdown();
}
localf.delete();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test to make sure that user namespace xattrs can be set only if path has
* access and for sticky directorries, only owner/privileged user can write.
* Trusted namespace xattrs can be set only with privileged users.
* As user1: Create a directory (/foo) as user1, chown it to user1 (and
* user1's group), grant rwx to "other".
* As user2: Set an xattr (should pass with path access).
* As user1: Set an xattr (should pass).
* As user2: Read the xattr (should pass). Remove the xattr (should pass with
* path access).
* As user1: Read the xattr (should pass). Remove the xattr (should pass).
* As user1: Change permissions only to owner
* As User2: Set an Xattr (Should fail set with no path access) Remove an
* Xattr (Should fail with no path access)
* As SuperUser: Set an Xattr with Trusted (Should pass)
*/
@Test(timeout=30000) public void testSetXAttrPermissionAsDifferentOwner() throws Exception {
final String USER1="user1";
final String GROUP1="supergroup";
final UserGroupInformation user1=UserGroupInformation.createUserForTesting(USER1,new String[]{GROUP1});
final UserGroupInformation user2=UserGroupInformation.createUserForTesting("user2",new String[]{"mygroup2"});
final UserGroupInformation SUPERUSER=UserGroupInformation.getCurrentUser();
MiniDFSCluster cluster=null;
PrintStream bak=null;
try {
final Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final FileSystem fs=cluster.getFileSystem();
fs.setOwner(new Path("/"),USER1,GROUP1);
bak=System.err;
final FsShell fshell=new FsShell(conf);
final ByteArrayOutputStream out=new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-mkdir","/foo"});
assertEquals("Return should be 0",0,ret);
out.reset();
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-chmod","707","/foo"});
assertEquals("Return should be 0",0,ret);
out.reset();
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a1","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-chmod","700","/foo"});
assertEquals("Return should be 0",0,ret);
out.reset();
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a2","/foo"});
assertEquals("Returned should be 1",1,ret);
final String str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a2","/foo"});
assertEquals("Returned should be 1",1,ret);
final String str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
}
);
SUPERUSER.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","trusted.a3","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
return null;
}
}
);
}
finally {
if (bak != null) {
System.setErr(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier ConditionMatcher HybridVerifier
@Test(timeout=300000) public void testAppendToFileBadArgs() throws Exception {
final int inputFileLength=1024 * 1024;
File testRoot=new File(TEST_ROOT_DIR,"testAppendToFileBadArgsDir");
testRoot.mkdirs();
File file1=new File(testRoot,"file1");
createLocalFileWithRandomData(inputFileLength,file1);
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem dfs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + dfs.getUri(),dfs instanceof DistributedFileSystem);
FsShell shell=new FsShell();
shell.setConf(conf);
String[] argv=new String[]{"-appendToFile",file1.toString()};
int res=ToolRunner.run(shell,argv);
assertThat(res,not(0));
Path remoteFile=new Path("/remoteFile");
argv=new String[]{"-appendToFile",file1.toString(),"-",remoteFile.toString()};
res=ToolRunner.run(shell,argv);
assertThat(res,not(0));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testPut() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs=(DistributedFileSystem)fs;
try {
new File(TEST_ROOT_DIR,".f1.crc").delete();
new File(TEST_ROOT_DIR,".f2.crc").delete();
final File f1=createLocalFile(new File(TEST_ROOT_DIR,"f1"));
final File f2=createLocalFile(new File(TEST_ROOT_DIR,"f2"));
final Path root=mkdir(dfs,new Path("/test/put"));
final Path dst=new Path(root,"dst");
show("begin");
final Thread copy2ndFileThread=new Thread(){
@Override public void run(){
try {
show("copy local " + f2 + " to remote "+ dst);
dfs.copyFromLocalFile(false,false,new Path(f2.getPath()),dst);
}
catch ( IOException ioe) {
show("good " + StringUtils.stringifyException(ioe));
return;
}
assertTrue(false);
}
}
;
SecurityManager sm=System.getSecurityManager();
System.out.println("SecurityManager = " + sm);
System.setSecurityManager(new SecurityManager(){
private boolean firstTime=true;
@Override public void checkPermission( Permission perm){
if (firstTime) {
Thread t=Thread.currentThread();
if (!t.toString().contains("DataNode")) {
String s="" + Arrays.asList(t.getStackTrace());
if (s.contains("FileUtil.copyContent")) {
firstTime=false;
copy2ndFileThread.start();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
}
}
}
}
}
);
show("copy local " + f1 + " to remote "+ dst);
dfs.copyFromLocalFile(false,false,new Path(f1.getPath()),dst);
show("done");
try {
copy2ndFileThread.join();
}
catch ( InterruptedException e) {
}
System.setSecurityManager(sm);
final Path destmultiple=mkdir(dfs,new Path("/test/putmultiple"));
Path[] srcs=new Path[2];
srcs[0]=new Path(f1.getPath());
srcs[1]=new Path(f2.getPath());
dfs.copyFromLocalFile(false,false,srcs,destmultiple);
srcs[0]=new Path(destmultiple,"f1");
srcs[1]=new Path(destmultiple,"f2");
assertTrue(dfs.exists(srcs[0]));
assertTrue(dfs.exists(srcs[1]));
final Path destmultiple2=mkdir(dfs,new Path("/test/movemultiple"));
srcs[0]=new Path(f1.getPath());
srcs[1]=new Path(f2.getPath());
dfs.moveFromLocalFile(srcs,destmultiple2);
assertFalse(f1.exists());
assertFalse(f2.exists());
srcs[0]=new Path(destmultiple2,"f1");
srcs[1]=new Path(destmultiple2,"f2");
assertTrue(dfs.exists(srcs[0]));
assertTrue(dfs.exists(srcs[1]));
f1.delete();
f2.delete();
}
finally {
try {
dfs.close();
}
catch ( Exception e) {
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testDu() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem fs=cluster.getFileSystem();
PrintStream psBackup=System.out;
ByteArrayOutputStream out=new ByteArrayOutputStream();
PrintStream psOut=new PrintStream(out);
System.setOut(psOut);
FsShell shell=new FsShell();
shell.setConf(conf);
try {
Path myPath=new Path("/test/dir");
assertTrue(fs.mkdirs(myPath));
assertTrue(fs.exists(myPath));
Path myFile=new Path("/test/dir/file");
writeFile(fs,myFile);
assertTrue(fs.exists(myFile));
Path myFile2=new Path("/test/dir/file2");
writeFile(fs,myFile2);
assertTrue(fs.exists(myFile2));
String[] args=new String[2];
args[0]="-du";
args[1]="/test/dir";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertTrue(val == 0);
String returnString=out.toString();
out.reset();
assertTrue(returnString.contains("22"));
assertTrue(returnString.contains("23"));
}
finally {
System.setOut(psBackup);
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testURIPaths() throws Exception {
Configuration srcConf=new HdfsConfiguration();
Configuration dstConf=new HdfsConfiguration();
MiniDFSCluster srcCluster=null;
MiniDFSCluster dstCluster=null;
File bak=new File(PathUtils.getTestDir(getClass()),"dfs_tmp_uri");
bak.mkdirs();
try {
srcCluster=new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build();
dstConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,bak.getAbsolutePath());
dstCluster=new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build();
FileSystem srcFs=srcCluster.getFileSystem();
FileSystem dstFs=dstCluster.getFileSystem();
FsShell shell=new FsShell();
shell.setConf(srcConf);
String[] argv=new String[2];
argv[0]="-ls";
argv[1]=dstFs.getUri().toString() + "/";
int ret=ToolRunner.run(shell,argv);
assertEquals("ls works on remote uri ",0,ret);
dstFs.mkdirs(new Path("/hadoopdir"));
argv=new String[2];
argv[0]="-rmr";
argv[1]=dstFs.getUri().toString() + "/hadoopdir";
ret=ToolRunner.run(shell,argv);
assertEquals("-rmr works on remote uri " + argv[1],0,ret);
argv[0]="-du";
argv[1]=dstFs.getUri().toString() + "/";
ret=ToolRunner.run(shell,argv);
assertEquals("du works on remote uri ",0,ret);
File furi=new File(TEST_ROOT_DIR,"furi");
createLocalFile(furi);
argv=new String[3];
argv[0]="-put";
argv[1]=furi.toURI().toString();
argv[2]=dstFs.getUri().toString() + "/furi";
ret=ToolRunner.run(shell,argv);
assertEquals(" put is working ",0,ret);
argv[0]="-cp";
argv[1]=dstFs.getUri().toString() + "/furi";
argv[2]=srcFs.getUri().toString() + "/furi";
ret=ToolRunner.run(shell,argv);
assertEquals(" cp is working ",0,ret);
assertTrue(srcFs.exists(new Path("/furi")));
argv=new String[2];
argv[0]="-cat";
argv[1]=dstFs.getUri().toString() + "/furi";
ret=ToolRunner.run(shell,argv);
assertEquals(" cat is working ",0,ret);
dstFs.delete(new Path("/furi"),true);
dstFs.delete(new Path("/hadoopdir"),true);
String file="/tmp/chownTest";
Path path=new Path(file);
Path parent=new Path("/tmp");
Path root=new Path("/");
TestDFSShell.writeFile(dstFs,path);
runCmd(shell,"-chgrp","-R","herbivores",dstFs.getUri().toString() + "/*");
confirmOwner(null,"herbivores",dstFs,parent,path);
runCmd(shell,"-chown","-R",":reptiles",dstFs.getUri().toString() + "/");
confirmOwner(null,"reptiles",dstFs,root,parent,path);
argv[0]="-cat";
argv[1]="hdfs:///furi";
ret=ToolRunner.run(shell,argv);
assertEquals(" default works for cat",0,ret);
argv[0]="-ls";
argv[1]="hdfs:///";
ret=ToolRunner.run(shell,argv);
assertEquals("default works for ls ",0,ret);
argv[0]="-rmr";
argv[1]="hdfs:///furi";
ret=ToolRunner.run(shell,argv);
assertEquals("default works for rm/rmr",0,ret);
}
finally {
if (null != srcCluster) {
srcCluster.shutdown();
}
if (null != dstCluster) {
dstCluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier ConditionMatcher
@Test(timeout=30000) public void testSetrep() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir1="/tmp/TestDFSShell-testSetrep-" + counter.getAndIncrement();
final String testdir2=testdir1 + "/nestedDir";
final Path hdfsFile1=new Path(testdir1,"testFileForSetrep");
final Path hdfsFile2=new Path(testdir2,"testFileForSetrep");
final Short oldRepFactor=new Short((short)1);
final Short newRepFactor=new Short((short)3);
try {
String[] argv;
cluster.waitActive();
fs=cluster.getFileSystem();
assertThat(fs.mkdirs(new Path(testdir2)),is(true));
shell=new FsShell(conf);
fs.create(hdfsFile1,true).close();
fs.create(hdfsFile2,true).close();
argv=new String[]{"-setrep",newRepFactor.toString(),hdfsFile1.toString()};
assertThat(shell.run(argv),is(SUCCESS));
assertThat(fs.getFileStatus(hdfsFile1).getReplication(),is(newRepFactor));
assertThat(fs.getFileStatus(hdfsFile2).getReplication(),is(oldRepFactor));
argv=new String[]{"-setrep",newRepFactor.toString(),testdir1};
assertThat(shell.run(argv),is(SUCCESS));
assertThat(fs.getFileStatus(hdfsFile1).getReplication(),is(newRepFactor));
assertThat(fs.getFileStatus(hdfsFile2).getReplication(),is(newRepFactor));
}
finally {
if (shell != null) {
shell.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testSetXAttrPermission() throws Exception {
UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
MiniDFSCluster cluster=null;
PrintStream bak=null;
try {
final Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path p=new Path("/foo");
fs.mkdirs(p);
bak=System.err;
final FsShell fshell=new FsShell(conf);
final ByteArrayOutputStream out=new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
fs.setPermission(p,new FsPermission((short)0700));
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 1",1,ret);
String str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
}
);
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
fs.setPermission(p,new FsPermission((short)0750));
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"});
assertEquals("Returned should be 1",1,ret);
String str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a1","/foo"});
assertEquals("Returned should be 1",1,ret);
str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
}
);
}
finally {
if (bak != null) {
System.setErr(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testCount() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem dfs=cluster.getFileSystem();
FsShell shell=new FsShell();
shell.setConf(conf);
try {
String root=createTree(dfs,"count");
runCount(root,2,4,shell);
runCount(root + "2",2,1,shell);
runCount(root + "2/f1",0,1,shell);
runCount(root + "2/sub",1,0,shell);
final FileSystem localfs=FileSystem.getLocal(conf);
Path localpath=new Path(TEST_ROOT_DIR,"testcount");
localpath=localpath.makeQualified(localfs.getUri(),localfs.getWorkingDirectory());
localfs.mkdirs(localpath);
final String localstr=localpath.toString();
System.out.println("localstr=" + localstr);
runCount(localstr,1,0,shell);
assertEquals(0,runCmd(shell,"-count",root,localstr));
}
finally {
try {
dfs.close();
}
catch ( Exception e) {
}
cluster.shutdown();
}
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=30000) public void testCopyCommandsWithForceOption() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final File localFile=new File(TEST_ROOT_DIR,"testFileForPut");
final String localfilepath=new Path(localFile.getAbsolutePath()).toUri().toString();
final String testdir="/tmp/TestDFSShell-testCopyCommandsWithForceOption-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
localFile.createNewFile();
writeFile(fs,new Path(testdir,"testFileForPut"));
shell=new FsShell();
String[] argv=new String[]{"-put","-f",localfilepath,testdir};
int res=ToolRunner.run(shell,argv);
assertEquals("put -f is not working",SUCCESS,res);
argv=new String[]{"-put",localfilepath,testdir};
res=ToolRunner.run(shell,argv);
assertEquals("put command itself is able to overwrite the file",ERROR,res);
argv=new String[]{"-copyFromLocal","-f",localfilepath,testdir};
res=ToolRunner.run(shell,argv);
assertEquals("copyFromLocal -f is not working",SUCCESS,res);
argv=new String[]{"-copyFromLocal",localfilepath,testdir};
res=ToolRunner.run(shell,argv);
assertEquals("copyFromLocal command itself is able to overwrite the file",ERROR,res);
argv=new String[]{"-cp","-f",localfilepath,testdir};
res=ToolRunner.run(shell,argv);
assertEquals("cp -f is not working",SUCCESS,res);
argv=new String[]{"-cp",localfilepath,testdir};
res=ToolRunner.run(shell,argv);
assertEquals("cp command itself is able to overwrite the file",ERROR,res);
}
finally {
if (null != shell) shell.close();
if (localFile.exists()) localFile.delete();
if (null != fs) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRemoteException() throws Exception {
UserGroupInformation tmpUGI=UserGroupInformation.createUserForTesting("tmpname",new String[]{"mygroup"});
MiniDFSCluster dfs=null;
PrintStream bak=null;
try {
final Configuration conf=new HdfsConfiguration();
dfs=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=dfs.getFileSystem();
Path p=new Path("/foo");
fs.mkdirs(p);
fs.setPermission(p,new FsPermission((short)0700));
bak=System.err;
tmpUGI.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
FsShell fshell=new FsShell(conf);
ByteArrayOutputStream out=new ByteArrayOutputStream();
PrintStream tmp=new PrintStream(out);
System.setErr(tmp);
String[] args=new String[2];
args[0]="-ls";
args[1]="/foo";
int ret=ToolRunner.run(fshell,args);
assertEquals("returned should be 1",1,ret);
String str=out.toString();
assertTrue("permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
}
);
}
finally {
if (bak != null) {
System.setErr(bak);
}
if (dfs != null) {
dfs.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* check command error outputs and exit statuses.
*/
@Test(timeout=30000) public void testErrOutPut() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
PrintStream bak=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem srcFs=cluster.getFileSystem();
Path root=new Path("/nonexistentfile");
bak=System.err;
ByteArrayOutputStream out=new ByteArrayOutputStream();
PrintStream tmp=new PrintStream(out);
System.setErr(tmp);
String[] argv=new String[2];
argv[0]="-cat";
argv[1]=root.toUri().getPath();
int ret=ToolRunner.run(new FsShell(),argv);
assertEquals(" -cat returned 1 ",1,ret);
String returned=out.toString();
assertTrue("cat does not print exceptions ",(returned.lastIndexOf("Exception") == -1));
out.reset();
argv[0]="-rm";
argv[1]=root.toString();
FsShell shell=new FsShell();
shell.setConf(conf);
ret=ToolRunner.run(shell,argv);
assertEquals(" -rm returned 1 ",1,ret);
returned=out.toString();
out.reset();
assertTrue("rm prints reasonable error ",(returned.lastIndexOf("No such file or directory") != -1));
argv[0]="-rmr";
argv[1]=root.toString();
ret=ToolRunner.run(shell,argv);
assertEquals(" -rmr returned 1",1,ret);
returned=out.toString();
assertTrue("rmr prints reasonable error ",(returned.lastIndexOf("No such file or directory") != -1));
out.reset();
argv[0]="-du";
argv[1]="/nonexistentfile";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue(" -du prints reasonable error ",(returned.lastIndexOf("No such file or directory") != -1));
out.reset();
argv[0]="-dus";
argv[1]="/nonexistentfile";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue(" -dus prints reasonable error",(returned.lastIndexOf("No such file or directory") != -1));
out.reset();
argv[0]="-ls";
argv[1]="/nonexistenfile";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue(" -ls does not return Found 0 items",(returned.lastIndexOf("Found 0") == -1));
out.reset();
argv[0]="-ls";
argv[1]="/nonexistentfile";
ret=ToolRunner.run(shell,argv);
assertEquals(" -lsr should fail ",1,ret);
out.reset();
srcFs.mkdirs(new Path("/testdir"));
argv[0]="-ls";
argv[1]="/testdir";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue(" -ls does not print out anything ",(returned.lastIndexOf("Found 0") == -1));
out.reset();
argv[0]="-ls";
argv[1]="/user/nonxistant/*";
ret=ToolRunner.run(shell,argv);
assertEquals(" -ls on nonexistent glob returns 1",1,ret);
out.reset();
argv[0]="-mkdir";
argv[1]="/testdir";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertEquals(" -mkdir returned 1 ",1,ret);
assertTrue(" -mkdir returned File exists",(returned.lastIndexOf("File exists") != -1));
Path testFile=new Path("/testfile");
OutputStream outtmp=srcFs.create(testFile);
outtmp.write(testFile.toString().getBytes());
outtmp.close();
out.reset();
argv[0]="-mkdir";
argv[1]="/testfile";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertEquals(" -mkdir returned 1",1,ret);
assertTrue(" -mkdir returned this is a file ",(returned.lastIndexOf("not a directory") != -1));
out.reset();
argv=new String[3];
argv[0]="-mv";
argv[1]="/testfile";
argv[2]="file";
ret=ToolRunner.run(shell,argv);
assertEquals("mv failed to rename",1,ret);
out.reset();
argv=new String[3];
argv[0]="-mv";
argv[1]="/testfile";
argv[2]="/testfiletest";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue("no output from rename",(returned.lastIndexOf("Renamed") == -1));
out.reset();
argv[0]="-mv";
argv[1]="/testfile";
argv[2]="/testfiletmp";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue(" unix like output",(returned.lastIndexOf("No such file or") != -1));
out.reset();
argv=new String[1];
argv[0]="-du";
srcFs.mkdirs(srcFs.getHomeDirectory());
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertEquals(" no error ",0,ret);
assertTrue("empty path specified",(returned.lastIndexOf("empty string") == -1));
out.reset();
argv=new String[3];
argv[0]="-test";
argv[1]="-d";
argv[2]="/no/such/dir";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertEquals(" -test -d wrong result ",1,ret);
assertTrue(returned.isEmpty());
}
finally {
if (bak != null) {
System.setErr(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testCopyToLocal() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
DistributedFileSystem dfs=(DistributedFileSystem)fs;
FsShell shell=new FsShell();
shell.setConf(conf);
try {
String root=createTree(dfs,"copyToLocal");
{
try {
assertEquals(0,runCmd(shell,"-copyToLocal",root + "*",TEST_ROOT_DIR));
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
File localroot=new File(TEST_ROOT_DIR,"copyToLocal");
File localroot2=new File(TEST_ROOT_DIR,"copyToLocal2");
File f1=new File(localroot,"f1");
assertTrue("Copying failed.",f1.isFile());
File f2=new File(localroot,"f2");
assertTrue("Copying failed.",f2.isFile());
File sub=new File(localroot,"sub");
assertTrue("Copying failed.",sub.isDirectory());
File f3=new File(sub,"f3");
assertTrue("Copying failed.",f3.isFile());
File f4=new File(sub,"f4");
assertTrue("Copying failed.",f4.isFile());
File f5=new File(localroot2,"f1");
assertTrue("Copying failed.",f5.isFile());
f1.delete();
f2.delete();
f3.delete();
f4.delete();
f5.delete();
sub.delete();
}
{
String[] args={"-copyToLocal","nosuchfile",TEST_ROOT_DIR};
try {
assertEquals(1,shell.run(args));
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
File f6=new File(TEST_ROOT_DIR,"nosuchfile");
assertTrue(!f6.exists());
}
}
finally {
try {
dfs.close();
}
catch ( Exception e) {
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testCopyCommandsToDirectoryWithPreserveOption() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir="/tmp/TestDFSShell-testCopyCommandsToDirectoryWithPreserveOption-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path srcDir=new Path(hdfsTestDir,"srcDir");
fs.mkdirs(srcDir);
fs.setAcl(srcDir,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE)));
fs.setPermission(srcDir,new FsPermission(ALL,READ_EXECUTE,EXECUTE,true));
Path srcFile=new Path(srcDir,"srcFile");
fs.create(srcFile).close();
FileStatus status=fs.getFileStatus(srcDir);
final long mtime=status.getModificationTime();
final long atime=status.getAccessTime();
final String owner=status.getOwner();
final String group=status.getGroup();
final FsPermission perm=status.getPermission();
fs.setXAttr(srcDir,USER_A1,USER_A1_VALUE);
fs.setXAttr(srcDir,TRUSTED_A1,TRUSTED_A1_VALUE);
shell=new FsShell(conf);
Path targetDir1=new Path(hdfsTestDir,"targetDir1");
String[] argv=new String[]{"-cp","-p",srcDir.toUri().toString(),targetDir1.toUri().toString()};
int ret=ToolRunner.run(shell,argv);
assertEquals("cp -p is not working",SUCCESS,ret);
FileStatus targetStatus=fs.getFileStatus(targetDir1);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
FsPermission targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map xattrs=fs.getXAttrs(targetDir1);
assertTrue(xattrs.isEmpty());
List acls=fs.getAclStatus(targetDir1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path targetDir2=new Path(hdfsTestDir,"targetDir2");
argv=new String[]{"-cp","-ptop",srcDir.toUri().toString(),targetDir2.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptop is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir2);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir2);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(targetDir2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path targetDir3=new Path(hdfsTestDir,"targetDir3");
argv=new String[]{"-cp","-ptopx",srcDir.toUri().toString(),targetDir3.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopx is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir3);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir3);
assertEquals(xattrs.size(),2);
assertArrayEquals(USER_A1_VALUE,xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE,xattrs.get(TRUSTED_A1));
acls=fs.getAclStatus(targetDir3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path targetDir4=new Path(hdfsTestDir,"targetDir4");
argv=new String[]{"-cp","-ptopa",srcDir.toUri().toString(),targetDir4.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir4);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir4);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(targetDir4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(srcDir),fs.getAclStatus(targetDir4));
Path targetDir5=new Path(hdfsTestDir,"targetDir5");
argv=new String[]{"-cp","-ptoa",srcDir.toUri().toString(),targetDir5.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptoa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir5);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir5);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(targetDir5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(srcDir),fs.getAclStatus(targetDir5));
}
finally {
if (shell != null) {
shell.close();
}
if (fs != null) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir="/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path src=new Path(hdfsTestDir,"srcfile");
fs.create(src).close();
fs.setAcl(src,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE)));
fs.setPermission(src,new FsPermission(ALL,READ_EXECUTE,EXECUTE,true));
FileStatus status=fs.getFileStatus(src);
final long mtime=status.getModificationTime();
final long atime=status.getAccessTime();
final String owner=status.getOwner();
final String group=status.getGroup();
final FsPermission perm=status.getPermission();
shell=new FsShell(conf);
Path target1=new Path(hdfsTestDir,"targetfile1");
String[] argv=new String[]{"-cp","-p",src.toUri().toString(),target1.toUri().toString()};
int ret=ToolRunner.run(shell,argv);
assertEquals("cp is not working",SUCCESS,ret);
FileStatus targetStatus=fs.getFileStatus(target1);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
FsPermission targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
List acls=fs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target2=new Path(hdfsTestDir,"targetfile2");
argv=new String[]{"-cp","-ptopa",src.toUri().toString(),target2.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target2);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
acls=fs.getAclStatus(target2).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src),fs.getAclStatus(target2));
}
finally {
if (null != shell) {
shell.close();
}
if (null != fs) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testGetFAttrErrors() throws Exception {
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
MiniDFSCluster cluster=null;
PrintStream bakErr=null;
try {
final Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final FileSystem fs=cluster.getFileSystem();
final Path p=new Path("/foo");
fs.mkdirs(p);
bakErr=System.err;
final FsShell fshell=new FsShell(conf);
final ByteArrayOutputStream out=new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
fs.setPermission(p,new FsPermission((short)0700));
{
final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
}
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"});
String str=out.toString();
assertTrue("xattr value was incorrectly returned",str.indexOf("1234") == -1);
out.reset();
return null;
}
}
);
{
final int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.nonexistent","/foo"});
String str=out.toString();
assertTrue("xattr value was incorrectly returned",str.indexOf("getfattr: At least one of the attributes provided was not found") >= 0);
out.reset();
}
}
finally {
if (bakErr != null) {
System.setErr(bakErr);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testCopyCommandsWithPreserveOption() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir="/tmp/TestDFSShell-testCopyCommandsWithPreserveOption-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path src=new Path(hdfsTestDir,"srcfile");
fs.create(src).close();
fs.setAcl(src,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE)));
FileStatus status=fs.getFileStatus(src);
final long mtime=status.getModificationTime();
final long atime=status.getAccessTime();
final String owner=status.getOwner();
final String group=status.getGroup();
final FsPermission perm=status.getPermission();
fs.setXAttr(src,USER_A1,USER_A1_VALUE);
fs.setXAttr(src,TRUSTED_A1,TRUSTED_A1_VALUE);
shell=new FsShell(conf);
Path target1=new Path(hdfsTestDir,"targetfile1");
String[] argv=new String[]{"-cp","-p",src.toUri().toString(),target1.toUri().toString()};
int ret=ToolRunner.run(shell,argv);
assertEquals("cp -p is not working",SUCCESS,ret);
FileStatus targetStatus=fs.getFileStatus(target1);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
FsPermission targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map xattrs=fs.getXAttrs(target1);
assertTrue(xattrs.isEmpty());
List acls=fs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target2=new Path(hdfsTestDir,"targetfile2");
argv=new String[]{"-cp","-ptop",src.toUri().toString(),target2.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptop is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target2);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target2);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(target2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target3=new Path(hdfsTestDir,"targetfile3");
argv=new String[]{"-cp","-ptopx",src.toUri().toString(),target3.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopx is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target3);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target3);
assertEquals(xattrs.size(),2);
assertArrayEquals(USER_A1_VALUE,xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE,xattrs.get(TRUSTED_A1));
acls=fs.getAclStatus(target3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target4=new Path(hdfsTestDir,"targetfile4");
argv=new String[]{"-cp","-ptopa",src.toUri().toString(),target4.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target4);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target4);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(target4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src),fs.getAclStatus(target4));
Path target5=new Path(hdfsTestDir,"targetfile5");
argv=new String[]{"-cp","-ptoa",src.toUri().toString(),target5.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptoa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target5);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target5);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(target5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src),fs.getAclStatus(target5));
}
finally {
if (null != shell) {
shell.close();
}
if (null != fs) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testLsr() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem dfs=cluster.getFileSystem();
try {
final String root=createTree(dfs,"lsr");
dfs.mkdirs(new Path(root,"zzz"));
runLsr(new FsShell(conf),root,0);
final Path sub=new Path(root,"sub");
dfs.setPermission(sub,new FsPermission((short)0));
final UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
final String tmpusername=ugi.getShortUserName() + "1";
UserGroupInformation tmpUGI=UserGroupInformation.createUserForTesting(tmpusername,new String[]{tmpusername});
String results=tmpUGI.doAs(new PrivilegedExceptionAction(){
@Override public String run() throws Exception {
return runLsr(new FsShell(conf),root,1);
}
}
);
assertTrue(results.contains("zzz"));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This test ensures the appropriate response (successful or failure) from
* a Datanode when the system is started with differing version combinations.
*
* For each 3-tuple in the cross product
* ({oldLayoutVersion,currentLayoutVersion,futureLayoutVersion},
* {currentNamespaceId,incorrectNamespaceId},
* {pastFsscTime,currentFsscTime,futureFsscTime})
* 1. Startup Namenode with version file containing
* (currentLayoutVersion,currentNamespaceId,currentFsscTime)
* 2. Attempt to startup Datanode with version file containing
* this iterations version 3-tuple
*
*/
@Test(timeout=300000) public void testVersions() throws Exception {
UpgradeUtilities.initialize();
Configuration conf=UpgradeUtilities.initializeStorageStateConf(1,new HdfsConfiguration());
StorageData[] versions=initializeVersions();
UpgradeUtilities.createNameNodeStorageDirs(conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY),"current");
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).startupOption(StartupOption.REGULAR).build();
StorageData nameNodeVersion=new StorageData(HdfsConstants.NAMENODE_LAYOUT_VERSION,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),UpgradeUtilities.getCurrentFsscTime(cluster),UpgradeUtilities.getCurrentBlockPoolID(cluster));
log("NameNode version info",NAME_NODE,null,nameNodeVersion);
String bpid=UpgradeUtilities.getCurrentBlockPoolID(cluster);
for (int i=0; i < versions.length; i++) {
File[] storage=UpgradeUtilities.createDataNodeStorageDirs(conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY),"current");
log("DataNode version info",DATA_NODE,i,versions[i]);
UpgradeUtilities.createDataNodeVersionFile(storage,versions[i].storageInfo,bpid,versions[i].blockPoolId);
try {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
}
catch ( Exception ignore) {
}
assertTrue(cluster.getNameNode() != null);
assertEquals(isVersionCompatible(nameNodeVersion,versions[i]),cluster.isDataNodeUp());
cluster.shutdownDataNodes();
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier
/**
* This test iterates over the testCases table for Datanode storage and
* attempts to startup the DataNode normally.
*/
@Test public void testDNStorageStates() throws Exception {
String[] baseDirs;
for (int numDirs=1; numDirs <= 2; numDirs++) {
conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,-1);
conf=UpgradeUtilities.initializeStorageStateConf(numDirs,conf);
for (int i=0; i < NUM_DN_TEST_CASES; i++) {
boolean[] testCase=testCases[i];
boolean shouldRecover=testCase[SHOULD_RECOVER];
boolean curAfterRecover=testCase[CURRENT_SHOULD_EXIST_AFTER_RECOVER];
boolean prevAfterRecover=testCase[PREVIOUS_SHOULD_EXIST_AFTER_RECOVER];
log("DATA_NODE recovery",numDirs,i,testCase);
createNameNodeStorageState(new boolean[]{true,true,false,false,false});
cluster=createCluster(conf);
baseDirs=createDataNodeStorageState(testCase);
if (!testCase[CURRENT_EXISTS] && !testCase[PREVIOUS_EXISTS] && !testCase[PREVIOUS_TMP_EXISTS]&& !testCase[REMOVED_TMP_EXISTS]) {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
}
else {
if (shouldRecover) {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
checkResultDataNode(baseDirs,curAfterRecover,prevAfterRecover);
}
else {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
assertFalse(cluster.getDataNodes().get(0).isDatanodeUp());
}
}
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier
/**
* This test iterates over the testCases table for block pool storage and
* attempts to startup the DataNode normally.
*/
@Test public void testBlockPoolStorageStates() throws Exception {
String[] baseDirs;
String bpid=UpgradeUtilities.getCurrentBlockPoolID(null);
for (int numDirs=1; numDirs <= 2; numDirs++) {
conf=new HdfsConfiguration();
conf.setInt("dfs.datanode.scan.period.hours",-1);
conf=UpgradeUtilities.initializeStorageStateConf(numDirs,conf);
for (int i=0; i < NUM_DN_TEST_CASES; i++) {
boolean[] testCase=testCases[i];
boolean shouldRecover=testCase[SHOULD_RECOVER];
boolean curAfterRecover=testCase[CURRENT_SHOULD_EXIST_AFTER_RECOVER];
boolean prevAfterRecover=testCase[PREVIOUS_SHOULD_EXIST_AFTER_RECOVER];
log("BLOCK_POOL recovery",numDirs,i,testCase);
createNameNodeStorageState(new boolean[]{true,true,false,false,false});
cluster=createCluster(conf);
baseDirs=createBlockPoolStorageState(bpid,testCase);
if (!testCase[CURRENT_EXISTS] && !testCase[PREVIOUS_EXISTS] && !testCase[PREVIOUS_TMP_EXISTS]&& !testCase[REMOVED_TMP_EXISTS]) {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
}
else {
if (shouldRecover) {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
checkResultBlockPool(baseDirs,curAfterRecover,prevAfterRecover);
}
else {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
assertFalse(cluster.getDataNodes().get(0).isBPServiceAlive(bpid));
}
}
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetPassword() throws Exception {
File testDir=new File(System.getProperty("test.build.data","target/test-dir"));
Configuration conf=new Configuration();
final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file/" + testDir+ "/test.jks";
File file=new File(testDir,"test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,ourUrl);
CredentialProvider provider=CredentialProviderFactory.getProviders(conf).get(0);
char[] keypass={'k','e','y','p','a','s','s'};
char[] storepass={'s','t','o','r','e','p','a','s','s'};
char[] trustpass={'t','r','u','s','t','p','a','s','s'};
assertEquals(null,provider.getCredentialEntry(DFS_SERVER_HTTPS_KEYPASSWORD_KEY));
assertEquals(null,provider.getCredentialEntry(DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY));
assertEquals(null,provider.getCredentialEntry(DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY));
try {
provider.createCredentialEntry(DFS_SERVER_HTTPS_KEYPASSWORD_KEY,keypass);
provider.createCredentialEntry(DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY,storepass);
provider.createCredentialEntry(DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY,trustpass);
provider.flush();
}
catch ( Exception e) {
e.printStackTrace();
throw e;
}
assertArrayEquals(keypass,provider.getCredentialEntry(DFS_SERVER_HTTPS_KEYPASSWORD_KEY).getCredential());
assertArrayEquals(storepass,provider.getCredentialEntry(DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY).getCredential());
assertArrayEquals(trustpass,provider.getCredentialEntry(DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY).getCredential());
Assert.assertEquals("keypass",DFSUtil.getPassword(conf,DFS_SERVER_HTTPS_KEYPASSWORD_KEY));
Assert.assertEquals("storepass",DFSUtil.getPassword(conf,DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY));
Assert.assertEquals("trustpass",DFSUtil.getPassword(conf,DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY));
Assert.assertEquals(null,DFSUtil.getPassword(conf,"invalid-alias"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetNNUris() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
final String NS1_NN1_ADDR="ns1-nn1.example.com:8020";
final String NS1_NN2_ADDR="ns1-nn2.example.com:8020";
final String NS2_NN_ADDR="ns2-nn.example.com:8020";
final String NN1_ADDR="nn.example.com:8020";
final String NN1_SRVC_ADDR="nn.example.com:8021";
final String NN2_ADDR="nn2.example.com:8020";
conf.set(DFS_NAMESERVICES,"ns1,ns2");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns1"),"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn1"),NS1_NN1_ADDR);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn2"),NS1_NN2_ADDR);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"ns2"),NS2_NN_ADDR);
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY,"hdfs://" + NN1_ADDR);
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://" + NN2_ADDR);
Collection uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(4,uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
assertTrue(uris.contains(new URI("hdfs://" + NN2_ADDR)));
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"viewfs://vfs-name.example.com");
uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(3,uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://ns1");
uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(3,uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
conf=new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://" + NN1_ADDR);
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY,NN1_ADDR);
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,NN1_SRVC_ADDR);
uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(1,uris.size());
assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
}
APIUtilityVerifier EqualityVerifier
@Test public void testGetInfoServer() throws IOException, URISyntaxException {
HdfsConfiguration conf=new HdfsConfiguration();
URI httpsport=DFSUtil.getInfoServer(null,conf,"https");
assertEquals(new URI("https",null,"0.0.0.0",DFS_NAMENODE_HTTPS_PORT_DEFAULT,null,null,null),httpsport);
URI httpport=DFSUtil.getInfoServer(null,conf,"http");
assertEquals(new URI("http",null,"0.0.0.0",DFS_NAMENODE_HTTP_PORT_DEFAULT,null,null,null),httpport);
URI httpAddress=DFSUtil.getInfoServer(new InetSocketAddress("localhost",8020),conf,"http");
assertEquals(URI.create("http://localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT),httpAddress);
}
APIUtilityVerifier EqualityVerifier
@Test public void testGetHaNnHttpAddresses() throws IOException {
final String LOGICAL_HOST_NAME="ns1";
final String NS1_NN1_ADDR="ns1-nn1.example.com:8020";
final String NS1_NN2_ADDR="ns1-nn2.example.com:8020";
Configuration conf=createWebHDFSHAConfiguration(LOGICAL_HOST_NAME,NS1_NN1_ADDR,NS1_NN2_ADDR);
Map> map=DFSUtil.getHaNnWebHdfsAddresses(conf,"webhdfs");
assertEquals(NS1_NN1_ADDR,map.get("ns1").get("nn1").toString());
assertEquals(NS1_NN2_ADDR,map.get("ns1").get("nn2").toString());
}
APIUtilityVerifier UtilityVerifier
/**
* Tests for empty configuration, an exception is thrown from{@link DFSUtil#getNNServiceRpcAddresses(Configuration)}{@link DFSUtil#getBackupNodeAddresses(Configuration)}{@link DFSUtil#getSecondaryNameNodeAddresses(Configuration)}
*/
@Test public void testEmptyConf(){
HdfsConfiguration conf=new HdfsConfiguration(false);
try {
Map> map=DFSUtil.getNNServiceRpcAddresses(conf);
fail("Expected IOException is not thrown, result was: " + DFSUtil.addressMapToString(map));
}
catch ( IOException expected) {
}
try {
Map> map=DFSUtil.getBackupNodeAddresses(conf);
fail("Expected IOException is not thrown, result was: " + DFSUtil.addressMapToString(map));
}
catch ( IOException expected) {
}
try {
Map> map=DFSUtil.getSecondaryNameNodeAddresses(conf);
fail("Expected IOException is not thrown, result was: " + DFSUtil.addressMapToString(map));
}
catch ( IOException expected) {
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Tests to ensure default namenode is used as fallback
*/
@Test public void testDefaultNamenode() throws IOException {
HdfsConfiguration conf=new HdfsConfiguration();
final String hdfs_default="hdfs://localhost:9999/";
conf.set(FS_DEFAULT_NAME_KEY,hdfs_default);
Map> addrMap=DFSUtil.getNNServiceRpcAddresses(conf);
assertEquals(1,addrMap.size());
Map defaultNsMap=addrMap.get(null);
assertEquals(1,defaultNsMap.size());
assertEquals(9999,defaultNsMap.get(null).getPort());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test {@link DFSUtil#getNameServiceIds(Configuration)}
*/
@Test public void testGetNameServiceIds(){
HdfsConfiguration conf=new HdfsConfiguration();
conf.set(DFS_NAMESERVICES,"nn1,nn2");
Collection nameserviceIds=DFSUtil.getNameServiceIds(conf);
Iterator it=nameserviceIds.iterator();
assertEquals(2,nameserviceIds.size());
assertEquals("nn1",it.next().toString());
assertEquals("nn2",it.next().toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHANameNodesWithFederation() throws URISyntaxException {
HdfsConfiguration conf=new HdfsConfiguration();
final String NS1_NN1_HOST="ns1-nn1.example.com:8020";
final String NS1_NN2_HOST="ns1-nn2.example.com:8020";
final String NS2_NN1_HOST="ns2-nn1.example.com:8020";
final String NS2_NN2_HOST="ns2-nn2.example.com:8020";
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://ns1");
conf.set(DFS_NAMESERVICES,"ns1,ns2");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns1"),"ns1-nn1,ns1-nn2");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns2"),"ns2-nn1,ns2-nn2");
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","ns1-nn1"),NS1_NN1_HOST);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","ns1-nn2"),NS1_NN2_HOST);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns2","ns2-nn1"),NS2_NN1_HOST);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns2","ns2-nn2"),NS2_NN2_HOST);
Map> map=DFSUtil.getHaNnRpcAddresses(conf);
assertTrue(HAUtil.isHAEnabled(conf,"ns1"));
assertTrue(HAUtil.isHAEnabled(conf,"ns2"));
assertFalse(HAUtil.isHAEnabled(conf,"ns3"));
assertEquals(NS1_NN1_HOST,map.get("ns1").get("ns1-nn1").toString());
assertEquals(NS1_NN2_HOST,map.get("ns1").get("ns1-nn2").toString());
assertEquals(NS2_NN1_HOST,map.get("ns2").get("ns2-nn1").toString());
assertEquals(NS2_NN2_HOST,map.get("ns2").get("ns2-nn2").toString());
assertEquals(NS1_NN1_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns1","ns1-nn1"));
assertEquals(NS1_NN2_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns1","ns1-nn2"));
assertEquals(NS2_NN1_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns2","ns2-nn1"));
assertEquals(null,DFSUtil.getNamenodeServiceAddr(conf,null,"ns1-nn1"));
assertEquals(null,DFSUtil.getNamenodeNameServiceId(conf));
assertEquals(null,DFSUtil.getSecondaryNameServiceId(conf));
Collection uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(2,uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
assertTrue(uris.contains(new URI("hdfs://ns2")));
}
APIUtilityVerifier InternalCallVerifier AssumptionSetter EqualityVerifier ConditionMatcher HybridVerifier
@Test(timeout=15000) public void testLocalhostReverseLookup(){
Assume.assumeTrue(!Shell.WINDOWS);
HdfsConfiguration conf=new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://127.0.0.1:8020");
Collection uris=DFSUtil.getNameServiceUris(conf);
assertEquals(1,uris.size());
for ( URI uri : uris) {
assertThat(uri.getHost(),not("127.0.0.1"));
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test conversion of LocatedBlock to BlockLocation
*/
@Test public void testLocatedBlocks2Locations(){
DatanodeInfo d=DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] ds=new DatanodeInfo[1];
ds[0]=d;
ExtendedBlock b1=new ExtendedBlock("bpid",1,1,1);
LocatedBlock l1=new LocatedBlock(b1,ds,0,false);
ExtendedBlock b2=new ExtendedBlock("bpid",2,1,1);
LocatedBlock l2=new LocatedBlock(b2,ds,0,true);
List ls=Arrays.asList(l1,l2);
LocatedBlocks lbs=new LocatedBlocks(10,false,ls,l2,true,null);
BlockLocation[] bs=DFSUtil.locatedBlocks2Locations(lbs);
assertTrue("expected 2 blocks but got " + bs.length,bs.length == 2);
int corruptCount=0;
for ( BlockLocation b : bs) {
if (b.isCorrupt()) {
corruptCount++;
}
}
assertTrue("expected 1 corrupt files but got " + corruptCount,corruptCount == 1);
bs=DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
assertEquals(0,bs.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}{@link DFSUtil#getNameServiceIdFromAddress(Configuration,InetSocketAddress,String)(Configuration)}
*/
@Test public void testMultipleNamenodes() throws IOException {
HdfsConfiguration conf=new HdfsConfiguration();
conf.set(DFS_NAMESERVICES,"nn1,nn2");
final String NN1_ADDRESS="localhost:9000";
final String NN2_ADDRESS="localhost:9001";
final String NN3_ADDRESS="localhost:9002";
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"nn1"),NN1_ADDRESS);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"nn2"),NN2_ADDRESS);
Map> nnMap=DFSUtil.getNNServiceRpcAddresses(conf);
assertEquals(2,nnMap.size());
Map nn1Map=nnMap.get("nn1");
assertEquals(1,nn1Map.size());
InetSocketAddress addr=nn1Map.get(null);
assertEquals("localhost",addr.getHostName());
assertEquals(9000,addr.getPort());
Map nn2Map=nnMap.get("nn2");
assertEquals(1,nn2Map.size());
addr=nn2Map.get(null);
assertEquals("localhost",addr.getHostName());
assertEquals(9001,addr.getPort());
checkNameServiceId(conf,NN1_ADDRESS,"nn1");
checkNameServiceId(conf,NN2_ADDRESS,"nn2");
checkNameServiceId(conf,NN3_ADDRESS,null);
assertFalse(HAUtil.isHAEnabled(conf,"nn1"));
assertFalse(HAUtil.isHAEnabled(conf,"nn2"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that the client respects its keepalive timeout.
*/
@Test(timeout=30000) public void testClientResponsesKeepAliveTimeout() throws Exception {
Configuration clientConf=new Configuration(conf);
final long CLIENT_EXPIRY_MS=10L;
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,CLIENT_EXPIRY_MS);
clientConf.set(DFS_CLIENT_CONTEXT,"testClientResponsesKeepAliveTimeout");
DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(),clientConf);
PeerCache peerCache=ClientContext.getFromConf(clientConf).getPeerCache();
DFSTestUtil.createFile(fs,TEST_FILE,1L,(short)1,0L);
assertEquals(0,peerCache.size());
assertXceiverCount(0);
DFSTestUtil.readFile(fs,TEST_FILE);
assertEquals(1,peerCache.size());
assertXceiverCount(1);
Thread.sleep(CLIENT_EXPIRY_MS + 1);
Peer peer=peerCache.get(dn.getDatanodeId(),false);
assertTrue(peer == null);
assertEquals(0,peerCache.size());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Regression test for HDFS-3357. Check that the datanode is respecting
* its configured keepalive timeout.
*/
@Test(timeout=30000) public void testDatanodeRespectsKeepAliveTimeout() throws Exception {
Configuration clientConf=new Configuration(conf);
final long CLIENT_EXPIRY_MS=60000L;
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,CLIENT_EXPIRY_MS);
clientConf.set(DFS_CLIENT_CONTEXT,"testDatanodeRespectsKeepAliveTimeout");
DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(),clientConf);
PeerCache peerCache=ClientContext.getFromConf(clientConf).getPeerCache();
DFSTestUtil.createFile(fs,TEST_FILE,1L,(short)1,0L);
assertEquals(0,peerCache.size());
assertXceiverCount(0);
DFSTestUtil.readFile(fs,TEST_FILE);
assertEquals(1,peerCache.size());
assertXceiverCount(1);
Thread.sleep(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT + 1);
assertXceiverCount(0);
assertEquals(1,peerCache.size());
Peer peer=peerCache.get(dn.getDatanodeId(),false);
assertNotNull(peer);
assertEquals(-1,peer.getInputStream().read());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testManyClosedSocketsInCache() throws Exception {
Configuration clientConf=new Configuration(conf);
clientConf.set(DFS_CLIENT_CONTEXT,"testManyClosedSocketsInCache");
DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(),clientConf);
PeerCache peerCache=ClientContext.getFromConf(clientConf).getPeerCache();
DFSTestUtil.createFile(fs,TEST_FILE,1L,(short)1,0L);
InputStream[] stms=new InputStream[5];
try {
for (int i=0; i < stms.length; i++) {
stms[i]=fs.open(TEST_FILE);
}
for ( InputStream stm : stms) {
IOUtils.copyBytes(stm,new NullOutputStream(),1024);
}
}
finally {
IOUtils.cleanup(null,stms);
}
assertEquals(5,peerCache.size());
Thread.sleep(1500);
assertXceiverCount(0);
assertEquals(5,peerCache.size());
DFSTestUtil.readFile(fs,TEST_FILE);
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDuplicateScans() throws Exception {
long startTime=Time.monotonicNow();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).build();
FileSystem fs=null;
try {
fs=cluster.getFileSystem();
DataNode dataNode=cluster.getDataNodes().get(0);
int infoPort=dataNode.getInfoPort();
long scanTimeBefore=0, scanTimeAfter=0;
for (int i=1; i < 10; i++) {
Path fileName=new Path("/test" + i);
DFSTestUtil.createFile(fs,fileName,1024,(short)1,1000L);
waitForVerification(infoPort,fs,fileName,i,startTime,TIMEOUT);
if (i > 1) {
scanTimeAfter=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + (i - 1))));
assertFalse("scan time shoud not be 0",scanTimeAfter == 0);
assertEquals("There should not be duplicate scan",scanTimeBefore,scanTimeAfter);
}
scanTimeBefore=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + i)));
}
cluster.restartDataNode(0);
Thread.sleep(10000);
dataNode=cluster.getDataNodes().get(0);
scanTimeAfter=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + (9))));
assertEquals("There should not be duplicate scan",scanTimeBefore,scanTimeAfter);
}
finally {
IOUtils.closeStream(fs);
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testBlockCorruptionPolicy() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
Random random=new Random();
FileSystem fs=null;
int rand=random.nextInt(3);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
fs=cluster.getFileSystem();
Path file1=new Path("/tmp/testBlockVerification/file1");
DFSTestUtil.createFile(fs,file1,1024,(short)3,0);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,file1);
DFSTestUtil.waitReplication(fs,file1,(short)3);
assertFalse(DFSTestUtil.allBlockReplicasCorrupt(cluster,file1,0));
assertTrue(MiniDFSCluster.corruptReplica(rand,block));
cluster.restartDataNode(rand);
DFSTestUtil.waitReplication(fs,file1,(short)2);
assertFalse(DFSTestUtil.allBlockReplicasCorrupt(cluster,file1,0));
assertTrue(MiniDFSCluster.corruptReplica(0,block));
assertTrue(MiniDFSCluster.corruptReplica(1,block));
assertTrue(MiniDFSCluster.corruptReplica(2,block));
for ( DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.runBlockScannerForBlock(dn,block);
}
DFSTestUtil.waitReplication(fs,file1,(short)3);
assertTrue(DFSTestUtil.allBlockReplicasCorrupt(cluster,file1,0));
cluster.shutdown();
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test that a data-node does not start if configuration specifies
* incorrect URI scheme in data directory.
* Test that a data-node starts if data directory is specified as
* URI = "file:///path" or as a non URI path.
*/
@Test public void testDataDirectories() throws IOException {
File dataDir=new File(BASE_DIR,"data").getCanonicalFile();
Configuration conf=cluster.getConfiguration(0);
String dnDir=makeURI("shv",null,fileAsURI(dataDir).getPath());
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,dnDir);
DataNode dn=null;
try {
dn=DataNode.createDataNode(new String[]{},conf);
fail();
}
catch ( Exception e) {
}
finally {
if (dn != null) {
dn.shutdown();
}
}
assertNull("Data-node startup should have failed.",dn);
String dnDir1=fileAsURI(dataDir).toString() + "1";
String dnDir2=makeURI("file","localhost",fileAsURI(dataDir).getPath() + "2");
String dnDir3=dataDir.getAbsolutePath() + "3";
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,dnDir1 + "," + dnDir2+ ","+ dnDir3);
try {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
assertTrue("Data-node should startup.",cluster.isDataNodeUp());
}
finally {
if (cluster != null) {
cluster.shutdownDataNodes();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests decommission with replicas on the target datanode cannot be migrated
* to other datanodes and satisfy the replication factor. Make sure the
* datanode won't get stuck in decommissioning state.
*/
@Test(timeout=360000) public void testDecommission2() throws IOException {
LOG.info("Starting test testDecommission");
int numNamenodes=1;
int numDatanodes=4;
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
startCluster(numNamenodes,numDatanodes,conf);
ArrayList> namenodeDecomList=new ArrayList>(numNamenodes);
namenodeDecomList.add(0,new ArrayList(numDatanodes));
Path file1=new Path("testDecommission2.dat");
int replicas=4;
ArrayList decommissionedNodes=namenodeDecomList.get(0);
FileSystem fileSys=cluster.getFileSystem(0);
FSNamesystem ns=cluster.getNamesystem(0);
writeFile(fileSys,file1,replicas);
int deadDecomissioned=ns.getNumDecomDeadDataNodes();
int liveDecomissioned=ns.getNumDecomLiveDataNodes();
DatanodeInfo decomNode=decommissionNode(0,null,decommissionedNodes,AdminStates.DECOMMISSIONED);
decommissionedNodes.add(decomNode);
assertEquals(deadDecomissioned,ns.getNumDecomDeadDataNodes());
assertEquals(liveDecomissioned + 1,ns.getNumDecomLiveDataNodes());
DFSClient client=getDfsClient(cluster.getNameNode(0),conf);
assertEquals("All datanodes must be alive",numDatanodes,client.datanodeReport(DatanodeReportType.LIVE).length);
assertNull(checkFile(fileSys,file1,replicas,decomNode.getXferAddr(),numDatanodes));
cleanupFile(fileSys,file1);
cluster.shutdown();
startCluster(1,4,conf);
cluster.shutdown();
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test using a "registration name" in a host include file.
* Registration names are DataNode names specified in the configuration by
* dfs.datanode.hostname. The DataNode will send this name to the NameNode
* as part of its registration. Registration names are helpful when you
* want to override the normal first result of DNS resolution on the
* NameNode. For example, a given datanode IP may map to two hostnames,
* and you may want to choose which hostname is used internally in the
* cluster.
* It is not recommended to use a registration name which is not also a
* valid DNS hostname for the DataNode. See HDFS-5237 for background.
*/
@Test(timeout=360000) public void testIncludeByRegistrationName() throws IOException, InterruptedException {
Configuration hdfsConf=new Configuration(conf);
final String registrationName="127.0.0.100";
final String nonExistentDn="127.0.0.10";
hdfsConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY,registrationName);
cluster=new MiniDFSCluster.Builder(hdfsConf).numDataNodes(1).checkDataNodeHostConfig(true).setupHostsFile(true).build();
cluster.waitActive();
ArrayList nodes=new ArrayList();
nodes.add(nonExistentDn);
writeConfigFile(hostsFile,nodes);
refreshNodes(cluster.getNamesystem(0),hdfsConf);
DFSClient client=getDfsClient(cluster.getNameNode(0),hdfsConf);
while (true) {
DatanodeInfo info[]=client.datanodeReport(DatanodeReportType.DEAD);
if (info.length == 1) {
break;
}
LOG.info("Waiting for datanode to be marked dead");
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
}
int dnPort=cluster.getDataNodes().get(0).getXferPort();
nodes=new ArrayList();
nodes.add(registrationName + ":" + dnPort);
writeConfigFile(hostsFile,nodes);
refreshNodes(cluster.getNamesystem(0),hdfsConf);
cluster.restartDataNode(0);
while (true) {
DatanodeInfo info[]=client.datanodeReport(DatanodeReportType.LIVE);
if (info.length == 1) {
Assert.assertFalse(info[0].isDecommissioned());
Assert.assertFalse(info[0].isDecommissionInProgress());
assertEquals(registrationName,info[0].getHostName());
break;
}
LOG.info("Waiting for datanode to come back");
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests restart of namenode while datanode hosts are added to exclude file
*/
@Test(timeout=360000) public void testDecommissionWithNamenodeRestart() throws IOException, InterruptedException {
LOG.info("Starting test testDecommissionWithNamenodeRestart");
int numNamenodes=1;
int numDatanodes=1;
int replicas=1;
startCluster(numNamenodes,numDatanodes,conf);
Path file1=new Path("testDecommission.dat");
FileSystem fileSys=cluster.getFileSystem();
writeFile(fileSys,file1,replicas);
DFSClient client=getDfsClient(cluster.getNameNode(),conf);
DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE);
DatanodeID excludedDatanodeID=info[0];
String excludedDatanodeName=info[0].getXferAddr();
writeConfigFile(excludeFile,new ArrayList(Arrays.asList(excludedDatanodeName)));
cluster.startDataNodes(conf,1,true,null,null,null,null);
numDatanodes+=1;
assertEquals("Number of datanodes should be 2 ",2,cluster.getDataNodes().size());
cluster.restartNameNode();
DatanodeInfo datanodeInfo=NameNodeAdapter.getDatanode(cluster.getNamesystem(),excludedDatanodeID);
waitNodeState(datanodeInfo,AdminStates.DECOMMISSIONED);
assertEquals("All datanodes must be alive",numDatanodes,client.datanodeReport(DatanodeReportType.LIVE).length);
int tries=0;
while (tries++ < 20) {
try {
Thread.sleep(1000);
if (checkFile(fileSys,file1,replicas,datanodeInfo.getXferAddr(),numDatanodes) == null) {
break;
}
}
catch ( InterruptedException ie) {
}
}
assertTrue("Checked if block was replicated after decommission, tried " + tries + " times.",tries < 20);
cleanupFile(fileSys,file1);
cluster.shutdown();
startCluster(numNamenodes,numDatanodes,conf);
cluster.shutdown();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests the normal path of batching up BlockLocation[]s to be passed to a
* single{@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}call
*/
@Test(timeout=60000) public void testGetFileBlockStorageLocationsBatching() throws Exception {
final Configuration conf=getTestConfiguration();
((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.TRACE);
((Log4JLogger)BlockStorageLocationUtil.LOG).getLogger().setLevel(Level.TRACE);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.TRACE);
conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,true);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
final DistributedFileSystem fs=cluster.getFileSystem();
final Path tmpFile1=new Path("/tmpfile1.dat");
final Path tmpFile2=new Path("/tmpfile2.dat");
DFSTestUtil.createFile(fs,tmpFile1,1024,(short)2,0xDEADDEADl);
DFSTestUtil.createFile(fs,tmpFile2,1024,(short)2,0xDEADDEADl);
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
try {
List list=Lists.newArrayList();
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile1,0,1024)));
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile2,0,1024)));
int totalRepl=0;
for ( BlockLocation loc : list) {
totalRepl+=loc.getHosts().length;
}
if (totalRepl == 4) {
return true;
}
}
catch ( IOException e) {
}
return false;
}
}
,500,30000);
BlockLocation[] blockLocs1=fs.getFileBlockLocations(tmpFile1,0,1024);
BlockLocation[] blockLocs2=fs.getFileBlockLocations(tmpFile2,0,1024);
BlockLocation[] blockLocs=(BlockLocation[])ArrayUtils.addAll(blockLocs1,blockLocs2);
BlockStorageLocation[] locs=fs.getFileBlockStorageLocations(Arrays.asList(blockLocs));
int counter=0;
for ( BlockStorageLocation l : locs) {
for (int i=0; i < l.getVolumeIds().length; i++) {
VolumeId id=l.getVolumeIds()[i];
String name=l.getNames()[i];
if (id != null) {
System.out.println("Datanode " + name + " has block "+ counter+ " on volume id "+ id.toString());
}
}
counter++;
}
assertEquals("Expected two HdfsBlockLocations for two 1-block files",2,locs.length);
for ( BlockStorageLocation l : locs) {
assertEquals("Expected two replicas for each block",2,l.getVolumeIds().length);
for (int i=0; i < l.getVolumeIds().length; i++) {
VolumeId id=l.getVolumeIds()[i];
String name=l.getNames()[i];
assertTrue("Expected block to be valid on datanode " + name,id != null);
}
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testLongLivedClient() throws IOException, InterruptedException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
FileChecksum checksum=fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build();
BlockTokenSecretManager btsm=cluster.getNamesystem().getBlockManager().getBlockTokenSecretManager();
btsm.setKeyUpdateIntervalForTesting(2 * 1000);
btsm.setTokenLifetime(2 * 1000);
btsm.clearAllKeysForTesting();
fs=getFileSystem(conf);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
LOG.info("Sleeping so that encryption keys expire...");
Thread.sleep(15 * 1000);
LOG.info("Done sleeping.");
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLongLivedReadClientAfterRestart() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
FileChecksum checksum=fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build();
fs=getFileSystem(conf);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
cluster.restartNameNode();
assertTrue(cluster.restartDataNode(0));
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testEncryptedRead() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
FileChecksum checksum=fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build();
fs=getFileSystem(conf);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testEncryptedAppendRequiringBlockTransfer() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
FSDataInputStream in=fs.open(TEST_PATH);
List locatedBlocks=DFSTestUtil.getAllBlocks(in);
in.close();
assertEquals(1,locatedBlocks.size());
assertEquals(3,locatedBlocks.get(0).getLocations().length);
DataNode dn=cluster.getDataNode(locatedBlocks.get(0).getLocations()[0].getIpcPort());
dn.shutdown();
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT + PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testEncryptedReadAfterNameNodeRestart() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
FileChecksum checksum=fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build();
fs=getFileSystem(conf);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
fs.close();
cluster.restartNameNode();
fs=getFileSystem(conf);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLongLivedWriteClientAfterRestart() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
cluster.restartNameNode();
assertTrue(cluster.restartDataNodes());
cluster.waitActive();
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT + PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testEncryptedReadWithRC4() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
FileChecksum checksum=fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
conf.set(DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY,"rc4");
cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build();
fs=getFileSystem(conf);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testClientThatDoesNotSupportEncryption() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build();
fs=getFileSystem(conf);
DFSClient client=DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
DFSClient spyClient=Mockito.spy(client);
Mockito.doReturn(false).when(spyClient).shouldEncryptData();
DFSClientAdapter.setDFSClient((DistributedFileSystem)fs,spyClient);
LogCapturer logs=GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(DataNode.class));
try {
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")) {
fail("Should not have been able to read without encryption enabled.");
}
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Could not obtain block:",ioe);
}
finally {
logs.stopCapturing();
}
fs.close();
if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")) {
GenericTestUtils.assertMatches(logs.getOutput(),"Failed to read expected encryption handshake from client at");
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testEncryptedAppend() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT + PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test getEncryptionZoneForPath as a non super user.
*/
@Test(timeout=60000) public void testGetEZAsNonSuperUser() throws Exception {
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
final Path testRoot=new Path(fsHelper.getTestRootDir());
final Path superPath=new Path(testRoot,"superuseronly");
final Path superPathFile=new Path(superPath,"file1");
final Path allPath=new Path(testRoot,"accessall");
final Path allPathFile=new Path(allPath,"file1");
final Path nonEZDir=new Path(testRoot,"nonEZDir");
final Path nonEZFile=new Path(nonEZDir,"file1");
final int len=8192;
fsWrapper.mkdir(testRoot,new FsPermission((short)0777),true);
fsWrapper.mkdir(superPath,new FsPermission((short)0700),false);
fsWrapper.mkdir(allPath,new FsPermission((short)0777),false);
fsWrapper.mkdir(nonEZDir,new FsPermission((short)0777),false);
dfsAdmin.createEncryptionZone(superPath,TEST_KEY);
dfsAdmin.createEncryptionZone(allPath,TEST_KEY);
dfsAdmin.allowSnapshot(new Path("/"));
final Path newSnap=fs.createSnapshot(new Path("/"));
DFSTestUtil.createFile(fs,superPathFile,len,(short)1,0xFEED);
DFSTestUtil.createFile(fs,allPathFile,len,(short)1,0xFEED);
DFSTestUtil.createFile(fs,nonEZFile,len,(short)1,0xFEED);
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final HdfsAdmin userAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
try {
userAdmin.getEncryptionZoneForPath(null);
fail("should have thrown NPE");
}
catch ( NullPointerException e) {
}
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(allPath).getPath().toString());
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(allPathFile).getPath().toString());
try {
userAdmin.getEncryptionZoneForPath(superPathFile);
fail("expected AccessControlException");
}
catch ( AccessControlException e) {
assertExceptionContains("Permission denied:",e);
}
assertNull("expected null for non-ez path",userAdmin.getEncryptionZoneForPath(nonEZDir));
assertNull("expected null for non-ez path",userAdmin.getEncryptionZoneForPath(nonEZFile));
String snapshottedAllPath=newSnap.toString() + allPath.toString();
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString());
fs.delete(allPathFile,false);
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString());
fs.delete(allPath,true);
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString());
assertNull("expected null for deleted file path",userAdmin.getEncryptionZoneForPath(allPathFile));
assertNull("expected null for deleted directory path",userAdmin.getEncryptionZoneForPath(allPath));
return null;
}
}
);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testReadWrite() throws Exception {
final HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
final Path baseFile=new Path("/base");
final int len=8192;
DFSTestUtil.createFile(fs,baseFile,len,(short)1,0xFEED);
final Path zone=new Path("/zone");
fs.mkdirs(zone);
dfsAdmin.createEncryptionZone(zone,TEST_KEY);
final Path encFile1=new Path(zone,"myfile");
DFSTestUtil.createFile(fs,encFile1,len,(short)1,0xFEED);
verifyFilesEqual(fs,baseFile,encFile1,len);
assertNumZones(1);
String keyName=dfsAdmin.listEncryptionZones().next().getKeyName();
cluster.getNamesystem().getProvider().rollNewVersion(keyName);
verifyFilesEqual(fs,baseFile,encFile1,len);
final Path encFile2=new Path(zone,"myfile2");
DFSTestUtil.createFile(fs,encFile2,len,(short)1,0xFEED);
FileEncryptionInfo feInfo1=getFileEncryptionInfo(encFile1);
FileEncryptionInfo feInfo2=getFileEncryptionInfo(encFile2);
assertFalse("EDEKs should be different",Arrays.equals(feInfo1.getEncryptedDataEncryptionKey(),feInfo2.getEncryptedDataEncryptionKey()));
assertNotEquals("Key was rolled, versions should be different",feInfo1.getEzKeyVersionName(),feInfo2.getEzKeyVersionName());
verifyFilesEqual(fs,encFile1,encFile2,len);
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testCipherSuiteNegotiation() throws Exception {
final HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
final Path zone=new Path("/zone");
fs.mkdirs(zone);
dfsAdmin.createEncryptionZone(zone,TEST_KEY);
DFSTestUtil.createFile(fs,new Path(zone,"success1"),0,(short)1,0xFEED);
fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(0);
try {
DFSTestUtil.createFile(fs,new Path(zone,"fail"),0,(short)1,0xFEED);
fail("Created a file without specifying a CipherSuite!");
}
catch ( UnknownCipherSuiteException e) {
assertExceptionContains("No cipher suites",e);
}
fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
try {
DFSTestUtil.createFile(fs,new Path(zone,"fail"),0,(short)1,0xFEED);
fail("Created a file without specifying a CipherSuite!");
}
catch ( UnknownCipherSuiteException e) {
assertExceptionContains("No cipher suites",e);
}
fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3);
fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
DFSTestUtil.createFile(fs,new Path(zone,"success2"),0,(short)1,0xFEED);
fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING);
DFSTestUtil.createFile(fs,new Path(zone,"success3"),4096,(short)1,0xFEED);
cluster.getNamesystem().getProvider().flush();
KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0);
List keys=provider.getKeys();
assertEquals("Expected NN to have created one key per zone",1,keys.size());
List allVersions=Lists.newArrayList();
for ( String key : keys) {
List versions=provider.getKeyVersions(key);
assertEquals("Should only have one key version per key",1,versions.size());
allVersions.addAll(versions);
}
for (int i=2; i <= 3; i++) {
FileEncryptionInfo feInfo=getFileEncryptionInfo(new Path(zone.toString() + "/success" + i));
assertEquals(feInfo.getCipherSuite(),CipherSuite.AES_CTR_NOPADDING);
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* Test that copy on write for blocks works correctly
* @throws IOException an exception might be thrown
*/
@Test public void testCopyOnWrite() throws IOException {
Configuration conf=new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
try {
Path file1=new Path("/filestatus.dat");
FSDataOutputStream stm=AppendTestUtil.createFile(fs,file1,1);
writeFile(stm);
stm.close();
DataNode[] dn=cluster.listDataNodes();
assertTrue("There should be only one datanode but found " + dn.length,dn.length == 1);
LocatedBlocks locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
List blocks=locations.getLocatedBlocks();
for (int i=0; i < blocks.size(); i=i + 2) {
ExtendedBlock b=blocks.get(i).getBlock();
final File f=DataNodeTestUtils.getFile(dn[0],b.getBlockPoolId(),b.getLocalBlock().getBlockId());
File link=new File(f.toString() + ".link");
System.out.println("Creating hardlink for File " + f + " to "+ link);
HardLink.createHardLink(f,link);
}
for (int i=0; i < blocks.size(); i++) {
ExtendedBlock b=blocks.get(i).getBlock();
System.out.println("testCopyOnWrite detaching block " + b);
assertTrue("Detaching block " + b + " should have returned true",DataNodeTestUtils.unlinkBlock(dn[0],b,1));
}
for (int i=0; i < blocks.size(); i++) {
ExtendedBlock b=blocks.get(i).getBlock();
System.out.println("testCopyOnWrite detaching block " + b);
assertTrue("Detaching block " + b + " should have returned false",!DataNodeTestUtils.unlinkBlock(dn[0],b,1));
}
}
finally {
client.close();
fs.close();
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* TC11: Racing rename
* @throws IOException an exception might be thrown
*/
@Test public void testTC11() throws Exception {
final Path p=new Path("/TC11/foo");
System.out.println("p=" + p);
final int len1=(int)BLOCK_SIZE;
{
FSDataOutputStream out=fs.create(p,false,buffersize,REPLICATION,BLOCK_SIZE);
AppendTestUtil.write(out,0,len1);
out.close();
}
FSDataOutputStream out=fs.append(p);
final int len2=(int)BLOCK_SIZE / 2;
AppendTestUtil.write(out,len1,len2);
out.hflush();
final Path pnew=new Path(p + ".new");
assertTrue(fs.rename(p,pnew));
out.close();
final long len=fs.getFileStatus(pnew).getLen();
final LocatedBlocks locatedblocks=fs.dfs.getNamenode().getBlockLocations(pnew.toString(),0L,len);
final int numblock=locatedblocks.locatedBlockCount();
for (int i=0; i < numblock; i++) {
final LocatedBlock lb=locatedblocks.get(i);
final ExtendedBlock blk=lb.getBlock();
final long size=lb.getBlockSize();
if (i < numblock - 1) {
assertEquals(BLOCK_SIZE,size);
}
for ( DatanodeInfo datanodeinfo : lb.getLocations()) {
final DataNode dn=cluster.getDataNode(datanodeinfo.getIpcPort());
final Block metainfo=DataNodeTestUtils.getFSDataset(dn).getStoredBlock(blk.getBlockPoolId(),blk.getBlockId());
assertEquals(size,metainfo.getNumBytes());
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier
/**
* TC7: Corrupted replicas are present.
* @throws IOException an exception might be thrown
*/
@Test public void testTC7() throws Exception {
final short repl=2;
final Path p=new Path("/TC7/foo");
System.out.println("p=" + p);
final int len1=(int)(BLOCK_SIZE / 2);
{
FSDataOutputStream out=fs.create(p,false,buffersize,repl,BLOCK_SIZE);
AppendTestUtil.write(out,0,len1);
out.close();
}
DFSTestUtil.waitReplication(fs,p,repl);
final LocatedBlocks locatedblocks=fs.dfs.getNamenode().getBlockLocations(p.toString(),0L,len1);
assertEquals(1,locatedblocks.locatedBlockCount());
final LocatedBlock lb=locatedblocks.get(0);
final ExtendedBlock blk=lb.getBlock();
assertEquals(len1,lb.getBlockSize());
DatanodeInfo[] datanodeinfos=lb.getLocations();
assertEquals(repl,datanodeinfos.length);
final DataNode dn=cluster.getDataNode(datanodeinfos[0].getIpcPort());
final File f=DataNodeTestUtils.getBlockFile(dn,blk.getBlockPoolId(),blk.getLocalBlock());
final RandomAccessFile raf=new RandomAccessFile(f,"rw");
AppendTestUtil.LOG.info("dn=" + dn + ", blk="+ blk+ " (length="+ blk.getNumBytes()+ ")");
assertEquals(len1,raf.length());
raf.setLength(0);
raf.close();
final int len2=(int)BLOCK_SIZE;
{
FSDataOutputStream out=fs.append(p);
AppendTestUtil.write(out,len1,len2);
out.close();
}
AppendTestUtil.check(fs,p,len1 + len2);
}
APIUtilityVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test case that stops a writer after finalizing a block but
* before calling completeFile, and then tries to recover
* the lease from another thread.
*/
@Test(timeout=60000) public void testRecoverFinalizedBlock() throws Throwable {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN=cluster.getNameNodeRpc();
NamenodeProtocols spyNN=spy(preSpyNN);
GenericTestUtils.DelayAnswer delayer=new GenericTestUtils.DelayAnswer(LOG);
doAnswer(delayer).when(spyNN).complete(anyString(),anyString(),(ExtendedBlock)anyObject(),anyLong());
DFSClient client=new DFSClient(null,spyNN,conf,null);
file1=new Path("/testRecoverFinalized");
final OutputStream stm=client.create("/testRecoverFinalized",true);
AppendTestUtil.write(stm,0,4096);
final AtomicReference err=new AtomicReference();
Thread t=new Thread(){
@Override public void run(){
try {
stm.close();
}
catch ( Throwable t) {
err.set(t);
}
}
}
;
t.start();
LOG.info("Waiting for close to get to latch...");
delayer.waitForCall();
LOG.info("Killing lease checker");
client.getLeaseRenewer().interruptAndJoin();
FileSystem fs1=cluster.getFileSystem();
FileSystem fs2=AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());
LOG.info("Recovering file");
recoverFile(fs2);
LOG.info("Telling close to proceed.");
delayer.proceed();
LOG.info("Waiting for close to finish.");
t.join();
LOG.info("Close finished.");
Throwable thrownByClose=err.get();
assertNotNull(thrownByClose);
assertTrue(thrownByClose instanceof IOException);
if (!thrownByClose.getMessage().contains("No lease on /testRecoverFinalized")) throw thrownByClose;
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test case that stops a writer after finalizing a block but
* before calling completeFile, recovers a file from another writer,
* starts writing from that writer, and then has the old lease holder
* call completeFile
*/
@Test(timeout=60000) public void testCompleteOtherLeaseHoldersFile() throws Throwable {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN=cluster.getNameNodeRpc();
NamenodeProtocols spyNN=spy(preSpyNN);
GenericTestUtils.DelayAnswer delayer=new GenericTestUtils.DelayAnswer(LOG);
doAnswer(delayer).when(spyNN).complete(anyString(),anyString(),(ExtendedBlock)anyObject(),anyLong());
DFSClient client=new DFSClient(null,spyNN,conf,null);
file1=new Path("/testCompleteOtherLease");
final OutputStream stm=client.create("/testCompleteOtherLease",true);
AppendTestUtil.write(stm,0,4096);
final AtomicReference err=new AtomicReference();
Thread t=new Thread(){
@Override public void run(){
try {
stm.close();
}
catch ( Throwable t) {
err.set(t);
}
}
}
;
t.start();
LOG.info("Waiting for close to get to latch...");
delayer.waitForCall();
LOG.info("Killing lease checker");
client.getLeaseRenewer().interruptAndJoin();
FileSystem fs1=cluster.getFileSystem();
FileSystem fs2=AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());
LOG.info("Recovering file");
recoverFile(fs2);
LOG.info("Opening file for append from new fs");
FSDataOutputStream appenderStream=fs2.append(file1);
LOG.info("Writing some data from new appender");
AppendTestUtil.write(appenderStream,0,4096);
LOG.info("Telling old close to proceed.");
delayer.proceed();
LOG.info("Waiting for close to finish.");
t.join();
LOG.info("Close finished.");
Throwable thrownByClose=err.get();
assertNotNull(thrownByClose);
assertTrue(thrownByClose instanceof IOException);
if (!thrownByClose.getMessage().contains("Lease mismatch")) throw thrownByClose;
appenderStream.close();
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Regression test for HDFS-2991. Creates and appends to files
* where blocks start/end on block boundaries.
*/
@Test public void testAppendRestart() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,0);
MiniDFSCluster cluster=null;
FSDataOutputStream stream=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem fs=cluster.getFileSystem();
File editLog=new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster,0).get(0),NNStorage.getInProgressEditsFileName(1));
EnumMap> counts;
Path p1=new Path("/block-boundaries");
writeAndAppend(fs,p1,BLOCK_SIZE,BLOCK_SIZE);
counts=FSImageTestUtil.countEditLogOpTypes(editLog);
assertEquals(2,(int)counts.get(FSEditLogOpCodes.OP_ADD).held);
assertEquals(2,(int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
assertEquals(2,(int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
Path p2=new Path("/not-block-boundaries");
writeAndAppend(fs,p2,BLOCK_SIZE / 2,BLOCK_SIZE);
counts=FSImageTestUtil.countEditLogOpTypes(editLog);
assertEquals(2 + 2,(int)counts.get(FSEditLogOpCodes.OP_ADD).held);
assertEquals(1,(int)counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held);
assertEquals(2 + 2,(int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
assertEquals(2 + 2,(int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
cluster.restartNameNode();
AppendTestUtil.check(fs,p1,2 * BLOCK_SIZE);
AppendTestUtil.check(fs,p2,3 * BLOCK_SIZE / 2);
}
finally {
IOUtils.closeStream(stream);
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the case that a replica is reported corrupt while it is not
* in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown.
* See Hadoop-4351.
*/
@Test public void testArrayOutOfBoundsException() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
final Path FILE_PATH=new Path("/tmp.txt");
final long FILE_LEN=1L;
DFSTestUtil.createFile(fs,FILE_PATH,FILE_LEN,(short)2,1L);
final String bpid=cluster.getNamesystem().getBlockPoolId();
File storageDir=cluster.getInstanceStorageDir(0,0);
File dataDir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
assertTrue("Data directory does not exist",dataDir.exists());
ExtendedBlock blk=getBlock(bpid,dataDir);
if (blk == null) {
storageDir=cluster.getInstanceStorageDir(0,1);
dataDir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
blk=getBlock(bpid,dataDir);
}
assertFalse("Data directory does not contain any blocks or there was an " + "IO error",blk == null);
cluster.startDataNodes(conf,1,true,null,null);
ArrayList datanodes=cluster.getDataNodes();
assertEquals(datanodes.size(),3);
DataNode dataNode=datanodes.get(2);
DatanodeRegistration dnR=DataNodeTestUtils.getDNRegistrationForBP(dataNode,blk.getBlockPoolId());
FSNamesystem ns=cluster.getNamesystem();
ns.writeLock();
try {
cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,new DatanodeInfo(dnR),"TEST","STORAGE_ID");
}
finally {
ns.writeUnlock();
}
fs.open(FILE_PATH);
fs.delete(FILE_PATH,false);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* check if DFS can handle corrupted blocks properly
*/
@Test public void testFileCorruption() throws Exception {
MiniDFSCluster cluster=null;
DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFileCorruption").setNumFiles(20).build();
try {
Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
util.createFiles(fs,"/srcdat");
File storageDir=cluster.getInstanceStorageDir(2,0);
String bpid=cluster.getNamesystem().getBlockPoolId();
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
assertTrue("data directory does not exist",data_dir.exists());
File[] blocks=data_dir.listFiles();
assertTrue("Blocks do not exist in data-dir",(blocks != null) && (blocks.length > 0));
for (int idx=0; idx < blocks.length; idx++) {
if (!blocks[idx].getName().startsWith("blk_")) {
continue;
}
System.out.println("Deliberately removing file " + blocks[idx].getName());
assertTrue("Cannot remove file.",blocks[idx].delete());
}
assertTrue("Corrupted replicas not handled properly.",util.checkFiles(fs,"/srcdat"));
util.cleanup(fs,"/srcdat");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Create a file, write something, hflush but not close.
* Then change lease period and wait for lease recovery.
* Finally, read the block directly from each Datanode and verify the content.
*/
@Test public void testLeaseExpireHardLimit() throws Exception {
System.out.println("testLeaseExpireHardLimit start");
final long leasePeriod=1000;
final int DATANODE_NUM=3;
Configuration conf=new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
DistributedFileSystem dfs=null;
try {
cluster.waitActive();
dfs=cluster.getFileSystem();
final String f=DIR + "foo";
final Path fpath=new Path(f);
HdfsDataOutputStream out=create(dfs,fpath,DATANODE_NUM);
out.write("something".getBytes());
out.hflush();
int actualRepl=out.getCurrentBlockReplication();
assertTrue(f + " should be replicated to " + DATANODE_NUM+ " datanodes.",actualRepl == DATANODE_NUM);
cluster.setLeasePeriod(leasePeriod,leasePeriod);
try {
Thread.sleep(5 * leasePeriod);
}
catch ( InterruptedException e) {
}
LocatedBlocks locations=dfs.dfs.getNamenode().getBlockLocations(f,0,Long.MAX_VALUE);
assertEquals(1,locations.locatedBlockCount());
LocatedBlock locatedblock=locations.getLocatedBlocks().get(0);
int successcount=0;
for ( DatanodeInfo datanodeinfo : locatedblock.getLocations()) {
DataNode datanode=cluster.getDataNode(datanodeinfo.getIpcPort());
ExtendedBlock blk=locatedblock.getBlock();
Block b=DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(blk.getBlockPoolId(),blk.getBlockId());
final File blockfile=DataNodeTestUtils.getFile(datanode,blk.getBlockPoolId(),b.getBlockId());
System.out.println("blockfile=" + blockfile);
if (blockfile != null) {
BufferedReader in=new BufferedReader(new FileReader(blockfile));
assertEquals("something",in.readLine());
in.close();
successcount++;
}
}
System.out.println("successcount=" + successcount);
assertTrue(successcount > 0);
}
finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
System.out.println("testLeaseExpireHardLimit successful");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test that file leases are persisted across namenode restarts.
*/
@Test public void testFileCreationNamenodeRestart() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs=null;
try {
cluster.waitActive();
fs=cluster.getFileSystem();
final int nnport=cluster.getNameNodePort();
Path file1=new Path("/filestatus.dat");
HdfsDataOutputStream stm=create(fs,file1,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file1);
assertEquals(file1 + " should be replicated to 1 datanode.",1,stm.getCurrentBlockReplication());
writeFile(stm,numBlocks * blockSize);
stm.hflush();
assertEquals(file1 + " should still be replicated to 1 datanode.",1,stm.getCurrentBlockReplication());
Path fileRenamed=new Path("/filestatusRenamed.dat");
fs.rename(file1,fileRenamed);
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file1 + " to "+ fileRenamed);
file1=fileRenamed;
Path file2=new Path("/filestatus2.dat");
FSDataOutputStream stm2=createFile(fs,file2,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file2);
Path file3=new Path("/user/home/fullpath.dat");
FSDataOutputStream stm3=createFile(fs,file3,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file3);
Path file4=new Path("/user/home/fullpath4.dat");
FSDataOutputStream stm4=createFile(fs,file4,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file4);
fs.mkdirs(new Path("/bin"));
fs.rename(new Path("/user/home"),new Path("/bin"));
Path file3new=new Path("/bin/home/fullpath.dat");
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file3 + " to "+ file3new);
Path file4new=new Path("/bin/home/fullpath4.dat");
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file4 + " to "+ file4new);
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
cluster.shutdown();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSOutputStream dfstream=(DFSOutputStream)(stm.getWrappedStream());
dfstream.setTestFilename(file1.toString());
dfstream=(DFSOutputStream)(stm3.getWrappedStream());
dfstream.setTestFilename(file3new.toString());
dfstream=(DFSOutputStream)(stm4.getWrappedStream());
dfstream.setTestFilename(file4new.toString());
byte[] buffer=AppendTestUtil.randomBytes(seed,1);
stm.write(buffer);
stm.close();
stm2.write(buffer);
stm2.close();
stm3.close();
stm4.close();
DFSClient client=fs.dfs;
LocatedBlocks locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up for file " + file1,locations.locatedBlockCount() == 3);
locations=client.getNamenode().getBlockLocations(file2.toString(),0,Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up for file " + file2,locations.locatedBlockCount() == 1);
}
finally {
IOUtils.closeStream(fs);
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test deleteOnExit
*/
@Test public void testDeleteOnExit() throws IOException {
Configuration conf=new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
FileSystem localfs=FileSystem.getLocal(conf);
try {
Path file1=new Path("filestatus.dat");
Path file2=new Path("filestatus2.dat");
Path file3=new Path("filestatus3.dat");
FSDataOutputStream stm1=createFile(fs,file1,1);
FSDataOutputStream stm2=createFile(fs,file2,1);
FSDataOutputStream stm3=createFile(localfs,file3,1);
System.out.println("DeleteOnExit: Created files.");
writeFile(stm1);
writeFile(stm3);
stm1.close();
stm2.close();
stm3.close();
fs.deleteOnExit(file1);
fs.deleteOnExit(file2);
localfs.deleteOnExit(file3);
fs.close();
localfs.close();
fs=null;
localfs=null;
fs=cluster.getFileSystem();
localfs=FileSystem.getLocal(conf);
assertTrue(file1 + " still exists inspite of deletOnExit set.",!fs.exists(file1));
assertTrue(file2 + " still exists inspite of deletOnExit set.",!fs.exists(file2));
assertTrue(file3 + " still exists inspite of deletOnExit set.",!localfs.exists(file3));
System.out.println("DeleteOnExit successful.");
}
finally {
IOUtils.closeStream(fs);
IOUtils.closeStream(localfs);
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* test getBlocks
*/
@Test public void testGetBlocks() throws Exception {
final Configuration CONF=new HdfsConfiguration();
final short REPLICATION_FACTOR=(short)2;
final int DEFAULT_BLOCK_SIZE=1024;
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DEFAULT_BLOCK_SIZE);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR).build();
try {
cluster.waitActive();
long fileLen=2 * DEFAULT_BLOCK_SIZE;
DFSTestUtil.createFile(cluster.getFileSystem(),new Path("/tmp.txt"),fileLen,REPLICATION_FACTOR,0L);
List locatedBlocks;
DatanodeInfo[] dataNodes=null;
boolean notWritten;
do {
final DFSClient dfsclient=new DFSClient(NameNode.getAddress(CONF),CONF);
locatedBlocks=dfsclient.getNamenode().getBlockLocations("/tmp.txt",0,fileLen).getLocatedBlocks();
assertEquals(2,locatedBlocks.size());
notWritten=false;
for (int i=0; i < 2; i++) {
dataNodes=locatedBlocks.get(i).getLocations();
if (dataNodes.length != REPLICATION_FACTOR) {
notWritten=true;
try {
Thread.sleep(10);
}
catch ( InterruptedException e) {
}
break;
}
}
}
while (notWritten);
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
NamenodeProtocol namenode=NameNodeProxies.createProxy(CONF,NameNode.getUri(addr),NamenodeProtocol.class).getProxy();
BlockWithLocations[] locs;
locs=namenode.getBlocks(dataNodes[0],fileLen).getBlocks();
assertEquals(locs.length,2);
assertEquals(locs[0].getStorageIDs().length,2);
assertEquals(locs[1].getStorageIDs().length,2);
locs=namenode.getBlocks(dataNodes[0],DEFAULT_BLOCK_SIZE).getBlocks();
assertEquals(locs.length,1);
assertEquals(locs[0].getStorageIDs().length,2);
locs=namenode.getBlocks(dataNodes[0],1).getBlocks();
assertEquals(locs.length,1);
assertEquals(locs[0].getStorageIDs().length,2);
getBlocksWithException(namenode,dataNodes[0],0);
getBlocksWithException(namenode,dataNodes[0],-1);
DatanodeInfo info=DFSTestUtil.getDatanodeInfo("1.2.3.4");
getBlocksWithException(namenode,info,2);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier EqualityVerifier
@Test public void testBlockKey(){
Map map=new HashMap();
final Random RAN=new Random();
final long seed=RAN.nextLong();
System.out.println("seed=" + seed);
RAN.setSeed(seed);
long[] blkids=new long[10];
for (int i=0; i < blkids.length; i++) {
blkids[i]=1000L + RAN.nextInt(100000);
map.put(new Block(blkids[i],0,blkids[i]),blkids[i]);
}
System.out.println("map=" + map.toString().replace(",","\n "));
for (int i=0; i < blkids.length; i++) {
Block b=new Block(blkids[i],0,GenerationStamp.GRANDFATHER_GENERATION_STAMP);
Long v=map.get(b);
System.out.println(b + " => " + v);
assertEquals(blkids[i],v.longValue());
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test if the datanodes returned by{@link ClientProtocol#getBlockLocations(String,long,long)} is correct
* when stale nodes checking is enabled. Also test during the scenario when 1)
* stale nodes checking is enabled, 2) a writing is going on, 3) a datanode
* becomes stale happen simultaneously
* @throws Exception
*/
@Test public void testReadSelectNonStaleDatanode() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY,true);
long staleInterval=30 * 1000 * 60;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,staleInterval);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).racks(racks).build();
cluster.waitActive();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
List nodeInfoList=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanodeListForReport(DatanodeReportType.LIVE);
assertEquals("Unexpected number of datanodes",numDatanodes,nodeInfoList.size());
FileSystem fileSys=cluster.getFileSystem();
FSDataOutputStream stm=null;
try {
final Path fileName=new Path("/file1");
stm=fileSys.create(fileName,true,fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,4096),(short)3,blockSize);
stm.write(new byte[(blockSize * 3) / 2]);
stm.hflush();
LocatedBlocks blocks=client.getNamenode().getBlockLocations(fileName.toString(),0,blockSize);
DatanodeInfo[] nodes=blocks.get(0).getLocations();
assertEquals(nodes.length,3);
DataNode staleNode=null;
DatanodeDescriptor staleNodeInfo=null;
staleNode=this.stopDataNodeHeartbeat(cluster,nodes[0].getHostName());
assertNotNull(staleNode);
staleNodeInfo=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(staleNode.getDatanodeId());
staleNodeInfo.setLastUpdate(Time.now() - staleInterval - 1);
LocatedBlocks blocksAfterStale=client.getNamenode().getBlockLocations(fileName.toString(),0,blockSize);
DatanodeInfo[] nodesAfterStale=blocksAfterStale.get(0).getLocations();
assertEquals(nodesAfterStale.length,3);
assertEquals(nodesAfterStale[2].getHostName(),nodes[0].getHostName());
DataNodeTestUtils.setHeartbeatsDisabledForTests(staleNode,false);
staleNodeInfo.setLastUpdate(Time.now());
LocatedBlock lastBlock=client.getLocatedBlocks(fileName.toString(),0,Long.MAX_VALUE).getLastLocatedBlock();
nodes=lastBlock.getLocations();
assertEquals(nodes.length,3);
staleNode=this.stopDataNodeHeartbeat(cluster,nodes[0].getHostName());
assertNotNull(staleNode);
cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(staleNode.getDatanodeId()).setLastUpdate(Time.now() - staleInterval - 1);
LocatedBlock lastBlockAfterStale=client.getLocatedBlocks(fileName.toString(),0,Long.MAX_VALUE).getLastLocatedBlock();
nodesAfterStale=lastBlockAfterStale.getLocations();
assertEquals(nodesAfterStale.length,3);
assertEquals(nodesAfterStale[2].getHostName(),nodes[0].getHostName());
}
finally {
if (stm != null) {
stm.close();
}
if (client != null) {
client.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Verify datanode port usage.
*/
@Test public void testDataNodePorts() throws Exception {
NameNode nn=null;
try {
nn=startNameNode();
Configuration conf2=new HdfsConfiguration(config);
conf2.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,new File(hdfsDir,"data").getPath());
conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,FileSystem.getDefaultUri(config).getAuthority());
conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,THIS_HOST);
boolean started=canStartDataNode(conf2);
assertFalse(started);
conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,THIS_HOST);
conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
started=canStartDataNode(conf2);
assertFalse(started);
conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,THIS_HOST);
conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,THIS_HOST);
conf2.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,THIS_HOST);
started=canStartDataNode(conf2);
assertTrue(started);
}
finally {
stopNameNode(nn);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Verify BackupNode port usage.
*/
@Test public void testBackupNodePorts() throws Exception {
NameNode nn=null;
try {
nn=startNameNode();
Configuration backup_config=new HdfsConfiguration(config);
backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,THIS_HOST);
backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
LOG.info("= Starting 1 on: " + backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
assertFalse("Backup started on same port as Namenode",canStartBackupNode(backup_config));
backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,THIS_HOST);
LOG.info("= Starting 2 on: " + backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
boolean started=canStartBackupNode(backup_config);
assertTrue("Backup Namenode should've started",started);
}
finally {
stopNameNode(nn);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Verify secondary namenode port usage.
*/
@Test public void testSecondaryNodePorts() throws Exception {
NameNode nn=null;
try {
nn=startNameNode();
Configuration conf2=new HdfsConfiguration(config);
conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
LOG.info("= Starting 1 on: " + conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
boolean started=canStartSecondaryNode(conf2);
assertFalse(started);
conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,THIS_HOST);
LOG.info("= Starting 2 on: " + conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
started=canStartSecondaryNode(conf2);
assertTrue(started);
}
finally {
stopNameNode(nn);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that we can set and clear quotas via {@link HdfsAdmin}.
*/
@Test public void testHdfsAdminSetQuota() throws Exception {
HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
FileSystem fs=null;
try {
fs=FileSystem.get(conf);
assertTrue(fs.mkdirs(TEST_PATH));
assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.setSpaceQuota(TEST_PATH,10);
assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(10,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.setQuota(TEST_PATH,10);
assertEquals(10,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(10,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.clearSpaceQuota(TEST_PATH);
assertEquals(10,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.clearQuota(TEST_PATH);
assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota());
}
finally {
if (fs != null) {
fs.close();
}
}
}
APIUtilityVerifier BooleanVerifier
@Test public void testClientDatanodeProtocol() throws IOException {
ClientDatanodeProtocolTranslatorPB translator=new ClientDatanodeProtocolTranslatorPB(nnAddress,UserGroupInformation.getCurrentUser(),conf,NetUtils.getDefaultSocketFactory(conf));
assertFalse(translator.isMethodSupported("refreshNamenodes"));
translator=new ClientDatanodeProtocolTranslatorPB(dnAddress,UserGroupInformation.getCurrentUser(),conf,NetUtils.getDefaultSocketFactory(conf));
assertTrue(translator.isMethodSupported("refreshNamenodes"));
}
APIUtilityVerifier BooleanVerifier
@Test public void tesJournalProtocol() throws IOException {
JournalProtocolTranslatorPB translator=(JournalProtocolTranslatorPB)NameNodeProxies.createNonHAProxy(conf,nnAddress,JournalProtocol.class,UserGroupInformation.getCurrentUser(),true).getProxy();
assertFalse(translator.isMethodSupported("startLogSegment"));
}
APIUtilityVerifier BooleanVerifier
@Test public void testRefreshCallQueueProtocol() throws IOException {
RefreshCallQueueProtocolClientSideTranslatorPB translator=(RefreshCallQueueProtocolClientSideTranslatorPB)NameNodeProxies.createNonHAProxy(conf,nnAddress,RefreshCallQueueProtocol.class,UserGroupInformation.getCurrentUser(),true).getProxy();
assertTrue(translator.isMethodSupported("refreshCallQueue"));
}
APIUtilityVerifier BooleanVerifier
@Test public void testInterDatanodeProtocol() throws IOException {
InterDatanodeProtocolTranslatorPB translator=new InterDatanodeProtocolTranslatorPB(nnAddress,UserGroupInformation.getCurrentUser(),conf,NetUtils.getDefaultSocketFactory(conf),0);
assertFalse(translator.isMethodSupported("initReplicaRecovery"));
translator=new InterDatanodeProtocolTranslatorPB(dnAddress,UserGroupInformation.getCurrentUser(),conf,NetUtils.getDefaultSocketFactory(conf),0);
assertTrue(translator.isMethodSupported("initReplicaRecovery"));
}
APIUtilityVerifier BooleanVerifier
@Test public void testRefreshUserMappingsProtocol() throws IOException {
RefreshUserMappingsProtocolClientSideTranslatorPB translator=(RefreshUserMappingsProtocolClientSideTranslatorPB)NameNodeProxies.createNonHAProxy(conf,nnAddress,RefreshUserMappingsProtocol.class,UserGroupInformation.getCurrentUser(),true).getProxy();
assertTrue(translator.isMethodSupported("refreshUserToGroupsMappings"));
}
APIUtilityVerifier BooleanVerifier
@Test public void testRefreshAuthorizationPolicyProtocol() throws IOException {
RefreshAuthorizationPolicyProtocolClientSideTranslatorPB translator=(RefreshAuthorizationPolicyProtocolClientSideTranslatorPB)NameNodeProxies.createNonHAProxy(conf,nnAddress,RefreshAuthorizationPolicyProtocol.class,UserGroupInformation.getCurrentUser(),true).getProxy();
assertTrue(translator.isMethodSupported("refreshServiceAcl"));
}
APIUtilityVerifier BooleanVerifier
@Test public void testNamenodeProtocol() throws IOException {
NamenodeProtocol np=NameNodeProxies.createNonHAProxy(conf,nnAddress,NamenodeProtocol.class,UserGroupInformation.getCurrentUser(),true).getProxy();
boolean exists=RpcClientUtil.isMethodSupported(np,NamenodeProtocolPB.class,RPC.RpcKind.RPC_PROTOCOL_BUFFER,RPC.getProtocolVersion(NamenodeProtocolPB.class),"rollEditLog");
assertTrue(exists);
exists=RpcClientUtil.isMethodSupported(np,NamenodeProtocolPB.class,RPC.RpcKind.RPC_PROTOCOL_BUFFER,RPC.getProtocolVersion(NamenodeProtocolPB.class),"bogusMethod");
assertFalse(exists);
}
APIUtilityVerifier BooleanVerifier
@Test public void testGetUserMappingsProtocol() throws IOException {
GetUserMappingsProtocolClientSideTranslatorPB translator=(GetUserMappingsProtocolClientSideTranslatorPB)NameNodeProxies.createNonHAProxy(conf,nnAddress,GetUserMappingsProtocol.class,UserGroupInformation.getCurrentUser(),true).getProxy();
assertTrue(translator.isMethodSupported("getGroupsForUser"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("unchecked") @Test public void testFactory() throws Exception {
final String[] groups=new String[]{"supergroup"};
final UserGroupInformation[] ugi=new UserGroupInformation[3];
for (int i=0; i < ugi.length; i++) {
ugi[i]=UserGroupInformation.createUserForTesting("user" + i,groups);
}
Mockito.doReturn(new HdfsFileStatus(0,false,1,1024,0,0,new FsPermission((short)777),"owner","group",new byte[0],new byte[0],1010,0,null)).when(mcp).getFileInfo(anyString());
Mockito.doReturn(new HdfsFileStatus(0,false,1,1024,0,0,new FsPermission((short)777),"owner","group",new byte[0],new byte[0],1010,0,null)).when(mcp).create(anyString(),(FsPermission)anyObject(),anyString(),(EnumSetWritable)anyObject(),anyBoolean(),anyShort(),anyLong(),(List)anyList());
final Configuration conf=new Configuration();
final DFSClient c1=createDFSClientAs(ugi[0],conf);
FSDataOutputStream out1=createFsOut(c1,"/out1");
final DFSClient c2=createDFSClientAs(ugi[0],conf);
FSDataOutputStream out2=createFsOut(c2,"/out2");
Assert.assertEquals(c1.getLeaseRenewer(),c2.getLeaseRenewer());
final DFSClient c3=createDFSClientAs(ugi[1],conf);
FSDataOutputStream out3=createFsOut(c3,"/out3");
Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
final DFSClient c4=createDFSClientAs(ugi[1],conf);
FSDataOutputStream out4=createFsOut(c4,"/out4");
Assert.assertEquals(c3.getLeaseRenewer(),c4.getLeaseRenewer());
final DFSClient c5=createDFSClientAs(ugi[2],conf);
FSDataOutputStream out5=createFsOut(c5,"/out5");
Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testLeaseAbort() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN=cluster.getNameNodeRpc();
NamenodeProtocols spyNN=spy(preSpyNN);
DFSClient dfs=new DFSClient(null,spyNN,conf,null);
byte[] buf=new byte[1024];
FSDataOutputStream c_out=createFsOut(dfs,dirString + "c");
c_out.write(buf,0,1024);
c_out.close();
DFSInputStream c_in=dfs.open(dirString + "c");
FSDataOutputStream d_out=createFsOut(dfs,dirString + "d");
doThrow(new RemoteException(InvalidToken.class.getName(),"Your token is worthless")).when(spyNN).renewLease(anyString());
LeaseRenewer originalRenewer=dfs.getLeaseRenewer();
dfs.lastLeaseRenewal=Time.now() - HdfsConstants.LEASE_SOFTLIMIT_PERIOD - 1000;
try {
dfs.renewLease();
}
catch ( IOException e) {
}
try {
d_out.write(buf,0,1024);
LOG.info("Write worked beyond the soft limit as expected.");
}
catch ( IOException e) {
Assert.fail("Write failed.");
}
dfs.lastLeaseRenewal=Time.now() - HdfsConstants.LEASE_HARDLIMIT_PERIOD - 1000;
dfs.renewLease();
try {
d_out.write(buf,0,1024);
d_out.close();
Assert.fail("Write did not fail even after the fatal lease renewal failure");
}
catch ( IOException e) {
LOG.info("Write failed as expected. ",e);
}
Thread.sleep(1000);
Assert.assertTrue(originalRenewer.isEmpty());
doNothing().when(spyNN).renewLease(anyString());
try {
int num=c_in.read(buf,0,1);
if (num != 1) {
Assert.fail("Failed to read 1 byte");
}
c_in.close();
}
catch ( IOException e) {
LOG.error("Read failed with ",e);
Assert.fail("Read after lease renewal failure failed");
}
try {
c_out=createFsOut(dfs,dirString + "c");
c_out.write(buf,0,1024);
c_out.close();
}
catch ( IOException e) {
LOG.error("Write failed with ",e);
Assert.fail("Write failed");
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* The following test first creates a file with a few blocks.
* It randomly truncates the replica of the last block stored in each datanode.
* Finally, it triggers block synchronization to synchronize all stored block.
*/
@Test public void testBlockSynchronization() throws Exception {
final int ORG_FILE_SIZE=3000;
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
cluster.waitActive();
DistributedFileSystem dfs=cluster.getFileSystem();
String filestr="/foo";
Path filepath=new Path(filestr);
DFSTestUtil.createFile(dfs,filepath,ORG_FILE_SIZE,REPLICATION_NUM,0L);
assertTrue(dfs.exists(filepath));
DFSTestUtil.waitReplication(dfs,filepath,REPLICATION_NUM);
LocatedBlock locatedblock=TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(),filestr);
DatanodeInfo[] datanodeinfos=locatedblock.getLocations();
assertEquals(REPLICATION_NUM,datanodeinfos.length);
DataNode[] datanodes=new DataNode[REPLICATION_NUM];
for (int i=0; i < REPLICATION_NUM; i++) {
datanodes[i]=cluster.getDataNode(datanodeinfos[i].getIpcPort());
assertTrue(datanodes[i] != null);
}
ExtendedBlock lastblock=locatedblock.getBlock();
DataNode.LOG.info("newblocks=" + lastblock);
for (int i=0; i < REPLICATION_NUM; i++) {
checkMetaInfo(lastblock,datanodes[i]);
}
DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
cluster.getNameNodeRpc().append(filestr,dfs.dfs.clientName);
waitLeaseRecovery(cluster);
Block[] updatedmetainfo=new Block[REPLICATION_NUM];
long oldSize=lastblock.getNumBytes();
lastblock=TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(),filestr).getBlock();
long currentGS=lastblock.getGenerationStamp();
for (int i=0; i < REPLICATION_NUM; i++) {
updatedmetainfo[i]=DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(lastblock.getBlockPoolId(),lastblock.getBlockId());
assertEquals(lastblock.getBlockId(),updatedmetainfo[i].getBlockId());
assertEquals(oldSize,updatedmetainfo[i].getNumBytes());
assertEquals(currentGS,updatedmetainfo[i].getGenerationStamp());
}
System.out.println("Testing that lease recovery cannot happen during safemode.");
filestr="/foo.safemode";
filepath=new Path(filestr);
dfs.create(filepath,(short)1);
cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER,false);
assertTrue(dfs.dfs.exists(filestr));
DFSTestUtil.waitReplication(dfs,filepath,(short)1);
waitLeaseRecovery(cluster);
LeaseManager lm=NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
assertTrue("Found " + lm.countLease() + " lease, expected 1",lm.countLease() == 1);
cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,false);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Block Recovery when the meta file not having crcs for all chunks in block
* file
*/
@Test public void testBlockRecoveryWithLessMetafile() throws Exception {
Configuration conf=new Configuration();
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,UserGroupInformation.getCurrentUser().getShortUserName());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
Path file=new Path("/testRecoveryFile");
DistributedFileSystem dfs=cluster.getFileSystem();
FSDataOutputStream out=dfs.create(file);
int count=0;
while (count < 2 * 1024 * 1024) {
out.writeBytes("Data");
count+=4;
}
out.hsync();
((DFSOutputStream)out.getWrappedStream()).abort();
LocatedBlocks locations=cluster.getNameNodeRpc().getBlockLocations(file.toString(),0,count);
ExtendedBlock block=locations.get(0).getBlock();
DataNode dn=cluster.getDataNodes().get(0);
BlockLocalPathInfo localPathInfo=dn.getBlockLocalPathInfo(block,null);
File metafile=new File(localPathInfo.getMetaPath());
assertTrue(metafile.exists());
RandomAccessFile raf=new RandomAccessFile(metafile,"rw");
raf.setLength(metafile.length() - 20);
raf.close();
DataNodeProperties dnProp=cluster.stopDataNode(0);
cluster.restartDataNode(dnProp,true);
DistributedFileSystem newdfs=(DistributedFileSystem)FileSystem.newInstance(cluster.getConfiguration(0));
count=0;
while (++count < 10 && !newdfs.recoverLease(file)) {
Thread.sleep(1000);
}
assertTrue("File should be closed",newdfs.recoverLease(file));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* This test makes the client does not renew its lease and also
* set the hard lease expiration period to be short 1s. Thus triggering
* lease expiration to happen while the client is still alive.
* The test makes sure that the lease recovery completes and the client
* fails if it continues to write to the file.
* @throws Exception
*/
@Test public void testHardLeaseRecovery() throws Exception {
String filestr="/hardLeaseRecovery";
AppendTestUtil.LOG.info("filestr=" + filestr);
Path filepath=new Path(filestr);
FSDataOutputStream stm=dfs.create(filepath,true,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE);
assertTrue(dfs.dfs.exists(filestr));
int size=AppendTestUtil.nextInt(FILE_SIZE);
AppendTestUtil.LOG.info("size=" + size);
stm.write(buffer,0,size);
AppendTestUtil.LOG.info("hflush");
stm.hflush();
AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
dfs.dfs.getLeaseRenewer().interruptAndJoin();
cluster.setLeasePeriod(LONG_LEASE_PERIOD,SHORT_LEASE_PERIOD);
LocatedBlocks locatedBlocks;
do {
Thread.sleep(SHORT_LEASE_PERIOD);
locatedBlocks=dfs.dfs.getLocatedBlocks(filestr,0L,size);
}
while (locatedBlocks.isUnderConstruction());
assertEquals(size,locatedBlocks.getFileLength());
try {
stm.write('b');
stm.close();
fail("Writer thread should have been killed");
}
catch ( IOException e) {
e.printStackTrace();
}
AppendTestUtil.LOG.info("File size is good. Now validating sizes from datanodes...");
AppendTestUtil.checkFullFile(dfs,filepath,size,buffer,filestr);
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* This test makes the client does not renew its lease and also
* set the soft lease expiration period to be short 1s. Thus triggering
* soft lease expiration to happen immediately by having another client
* trying to create the same file.
* The test makes sure that the lease recovery completes.
* @throws Exception
*/
@Test public void testSoftLeaseRecovery() throws Exception {
Map u2g_map=new HashMap(1);
u2g_map.put(fakeUsername,new String[]{fakeGroup});
DFSTestUtil.updateConfWithFakeGroupMapping(conf,u2g_map);
cluster.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD,HdfsConstants.LEASE_HARDLIMIT_PERIOD);
String filestr="/foo" + AppendTestUtil.nextInt();
AppendTestUtil.LOG.info("filestr=" + filestr);
Path filepath=new Path(filestr);
FSDataOutputStream stm=dfs.create(filepath,true,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE);
assertTrue(dfs.dfs.exists(filestr));
int size=AppendTestUtil.nextInt(FILE_SIZE);
AppendTestUtil.LOG.info("size=" + size);
stm.write(buffer,0,size);
AppendTestUtil.LOG.info("hflush");
stm.hflush();
AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
dfs.dfs.getLeaseRenewer().interruptAndJoin();
cluster.setLeasePeriod(SHORT_LEASE_PERIOD,LONG_LEASE_PERIOD);
{
UserGroupInformation ugi=UserGroupInformation.createUserForTesting(fakeUsername,new String[]{fakeGroup});
FileSystem dfs2=DFSTestUtil.getFileSystemAs(ugi,conf);
boolean done=false;
for (int i=0; i < 10 && !done; i++) {
AppendTestUtil.LOG.info("i=" + i);
try {
dfs2.create(filepath,false,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE);
fail("Creation of an existing file should never succeed.");
}
catch ( FileAlreadyExistsException ex) {
done=true;
}
catch ( AlreadyBeingCreatedException ex) {
AppendTestUtil.LOG.info("GOOD! got " + ex.getMessage());
}
catch ( IOException ioe) {
AppendTestUtil.LOG.warn("UNEXPECTED IOException",ioe);
}
if (!done) {
AppendTestUtil.LOG.info("sleep " + 5000 + "ms");
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
}
}
assertTrue(done);
}
AppendTestUtil.LOG.info("Lease for file " + filepath + " is recovered. "+ "Validating its contents now...");
long fileSize=dfs.getFileStatus(filepath).getLen();
assertTrue("File should be " + size + " bytes, but is actually "+ " found to be "+ fileSize+ " bytes",fileSize == size);
AppendTestUtil.LOG.info("File size is good. " + "Now validating data and sizes from datanodes...");
AppendTestUtil.checkFullFile(dfs,filepath,size,buffer,filestr);
}
APIUtilityVerifier IdentityVerifier
@Test public void testInstanceSharing() throws IOException {
LeaseRenewer lr=LeaseRenewer.getInstance(FAKE_AUTHORITY,FAKE_UGI_A,MOCK_DFSCLIENT);
LeaseRenewer lr2=LeaseRenewer.getInstance(FAKE_AUTHORITY,FAKE_UGI_A,MOCK_DFSCLIENT);
Assert.assertSame(lr,lr2);
LeaseRenewer lr3=LeaseRenewer.getInstance(FAKE_AUTHORITY,FAKE_UGI_B,MOCK_DFSCLIENT);
Assert.assertNotSame(lr,lr3);
LeaseRenewer lr4=LeaseRenewer.getInstance("someOtherAuthority",FAKE_UGI_B,MOCK_DFSCLIENT);
Assert.assertNotSame(lr,lr4);
Assert.assertNotSame(lr3,lr4);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test when input path is a file
*/
@Test public void testFile() throws IOException {
fc.mkdir(TEST_DIR,FsPermission.getDefault(),true);
writeFile(fc,FILE1,FILE_LEN);
RemoteIterator itor=fc.util().listFiles(FILE1,true);
LocatedFileStatus stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
itor=fc.util().listFiles(FILE1,false);
stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test when input path is a directory
*/
@Test public void testDirectory() throws IOException {
fc.mkdir(DIR1,FsPermission.getDefault(),true);
RemoteIterator itor=fc.util().listFiles(DIR1,true);
assertFalse(itor.hasNext());
itor=fc.util().listFiles(DIR1,false);
assertFalse(itor.hasNext());
writeFile(fc,FILE2,FILE_LEN);
itor=fc.util().listFiles(DIR1,true);
LocatedFileStatus stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fc.makeQualified(FILE2),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
itor=fc.util().listFiles(DIR1,false);
stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fc.makeQualified(FILE2),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
writeFile(fc,FILE1,FILE_LEN);
writeFile(fc,FILE3,FILE_LEN);
itor=fc.util().listFiles(TEST_DIR,true);
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE2),stat.getPath());
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE3),stat.getPath());
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertFalse(itor.hasNext());
itor=fc.util().listFiles(TEST_DIR,false);
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertFalse(itor.hasNext());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test when input patch has a symbolic links as its children
*/
@Test public void testSymbolicLinks() throws IOException {
writeFile(fc,FILE1,FILE_LEN);
writeFile(fc,FILE2,FILE_LEN);
writeFile(fc,FILE3,FILE_LEN);
Path dir4=new Path(TEST_DIR,"dir4");
Path dir5=new Path(dir4,"dir5");
Path file4=new Path(dir4,"file4");
fc.createSymlink(DIR1,dir5,true);
fc.createSymlink(FILE1,file4,true);
RemoteIterator itor=fc.util().listFiles(dir4,true);
LocatedFileStatus stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE2),stat.getPath());
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE3),stat.getPath());
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertFalse(itor.hasNext());
itor=fc.util().listFiles(dir4,false);
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertFalse(itor.hasNext());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests get/set working directory in DFS.
*/
@Test(timeout=20000) public void testWorkingDirectory() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys=cluster.getFileSystem();
try {
Path orig_path=fileSys.getWorkingDirectory();
assertTrue(orig_path.isAbsolute());
Path file1=new Path("somewhat/random.txt");
writeFile(fileSys,file1);
assertTrue(fileSys.exists(new Path(orig_path,file1.toString())));
fileSys.delete(file1,true);
Path subdir1=new Path("/somewhere");
fileSys.setWorkingDirectory(subdir1);
writeFile(fileSys,file1);
cleanupFile(fileSys,new Path(subdir1,file1.toString()));
Path subdir2=new Path("else");
fileSys.setWorkingDirectory(subdir2);
writeFile(fileSys,file1);
readFile(fileSys,file1);
cleanupFile(fileSys,new Path(new Path(subdir1,subdir2.toString()),file1.toString()));
Path home=fileSys.makeQualified(new Path("/user/" + getUserName(fileSys)));
Path fsHome=fileSys.getHomeDirectory();
assertEquals(home,fsHome);
}
finally {
fileSys.close();
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Bring up two clusters and assert that they are in different directories.
* @throws Throwable on a failure
*/
@Test(timeout=100000) public void testDualClusters() throws Throwable {
File testDataCluster2=new File(testDataPath,CLUSTER_2);
File testDataCluster3=new File(testDataPath,CLUSTER_3);
Configuration conf=new HdfsConfiguration();
String c2Path=testDataCluster2.getAbsolutePath();
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,c2Path);
MiniDFSCluster cluster2=new MiniDFSCluster.Builder(conf).build();
MiniDFSCluster cluster3=null;
try {
String dataDir2=cluster2.getDataDirectory();
assertEquals(new File(c2Path + "/data"),new File(dataDir2));
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,testDataCluster3.getAbsolutePath());
MiniDFSCluster.Builder builder=new MiniDFSCluster.Builder(conf);
cluster3=builder.build();
String dataDir3=cluster3.getDataDirectory();
assertTrue("Clusters are bound to the same directory: " + dataDir2,!dataDir2.equals(dataDir3));
}
finally {
MiniDFSCluster.shutdownCluster(cluster3);
MiniDFSCluster.shutdownCluster(cluster2);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Verify that without system properties the cluster still comes up, provided
* the configuration is set
* @throws Throwable on a failure
*/
@Test(timeout=100000) public void testClusterWithoutSystemProperties() throws Throwable {
System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
Configuration conf=new HdfsConfiguration();
File testDataCluster1=new File(testDataPath,CLUSTER_1);
String c1Path=testDataCluster1.getAbsolutePath();
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,c1Path);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
assertEquals(new File(c1Path + "/data"),new File(cluster.getDataDirectory()));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMissingBlocksAlert() throws IOException, InterruptedException, MalformedObjectNameException, AttributeNotFoundException, MBeanException, ReflectionException, InstanceNotFoundException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,0);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
int fileLen=10 * 1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,fileLen / 2);
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
final BlockManager bm=cluster.getNamesystem().getBlockManager();
DistributedFileSystem dfs=cluster.getFileSystem();
DFSTestUtil.createFile(dfs,new Path("/testMissingBlocksAlert/file1"),fileLen,(short)3,0);
Path corruptFile=new Path("/testMissingBlocks/corruptFile");
DFSTestUtil.createFile(dfs,corruptFile,fileLen,(short)3,0);
ExtendedBlock block=DFSTestUtil.getFirstBlock(dfs,corruptFile);
assertTrue(TestDatanodeBlockScanner.corruptReplica(block,0));
FSDataInputStream in=dfs.open(corruptFile);
try {
in.readFully(new byte[fileLen]);
}
catch ( ChecksumException ignored) {
}
in.close();
LOG.info("Waiting for missing blocks count to increase...");
while (dfs.getMissingBlocksCount() <= 0) {
Thread.sleep(100);
}
assertTrue(dfs.getMissingBlocksCount() == 1);
assertEquals(4,dfs.getUnderReplicatedBlocksCount());
assertEquals(3,bm.getUnderReplicatedNotMissingBlocks());
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
Assert.assertEquals(1,(long)(Long)mbs.getAttribute(mxbeanName,"NumberOfMissingBlocks"));
dfs.delete(corruptFile,true);
LOG.info("Waiting for missing blocks count to be zero...");
while (dfs.getMissingBlocksCount() > 0) {
Thread.sleep(100);
}
assertEquals(2,dfs.getUnderReplicatedBlocksCount());
assertEquals(2,bm.getUnderReplicatedNotMissingBlocks());
Assert.assertEquals(0,(long)(Long)mbs.getAttribute(mxbeanName,"NumberOfMissingBlocks"));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testRestartDfsWithAbandonedBlock() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,0);
MiniDFSCluster cluster=null;
long len=0;
FSDataOutputStream stream;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
stream=fs.create(FILE_PATH,true,BLOCK_SIZE,(short)1,BLOCK_SIZE);
stream.write(DATA_BEFORE_RESTART);
stream.hflush();
while (len < BLOCK_SIZE * (NUM_BLOCKS - 1)) {
FileStatus status=fs.getFileStatus(FILE_PATH);
len=status.getLen();
Thread.sleep(100);
}
DFSClient dfsclient=DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
HdfsFileStatus fileStatus=dfsclient.getNamenode().getFileInfo(FILE_NAME);
LocatedBlocks blocks=dfsclient.getNamenode().getBlockLocations(FILE_NAME,0,BLOCK_SIZE * NUM_BLOCKS);
assertEquals(NUM_BLOCKS,blocks.getLocatedBlocks().size());
LocatedBlock b=blocks.getLastLocatedBlock();
dfsclient.getNamenode().abandonBlock(b.getBlock(),fileStatus.getFileId(),FILE_NAME,dfsclient.clientName);
cluster.restartNameNode();
FileStatus status=fs.getFileStatus(FILE_PATH);
assertTrue("Length incorrect: " + status.getLen(),status.getLen() == len - BLOCK_SIZE);
FSDataInputStream readStream=fs.open(FILE_PATH);
try {
byte[] verifyBuf=new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
IOUtils.readFully(readStream,verifyBuf,0,verifyBuf.length);
byte[] expectedBuf=new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
System.arraycopy(DATA_BEFORE_RESTART,0,expectedBuf,0,expectedBuf.length);
assertArrayEquals(expectedBuf,verifyBuf);
}
finally {
IOUtils.closeStream(readStream);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Creates and closes a file of certain length.
* Calls append to allow next write() operation to add to the end of it
* After write() invocation, calls hflush() to make sure that data sunk through
* the pipeline and check the state of the last block's replica.
* It supposes to be in RBW state
* @throws IOException in case of an error
*/
@Test public void pipeline_01() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
if (LOG.isDebugEnabled()) {
LOG.debug("Running " + METHOD_NAME);
}
Path filePath=new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs,filePath,FILE_SIZE,REPL_FACTOR,rand.nextLong());
if (LOG.isDebugEnabled()) {
LOG.debug("Invoking append but doing nothing otherwise...");
}
FSDataOutputStream ofs=fs.append(filePath);
ofs.writeBytes("Some more stuff to write");
((DFSOutputStream)ofs.getWrappedStream()).hflush();
List lb=cluster.getNameNodeRpc().getBlockLocations(filePath.toString(),FILE_SIZE - 1,FILE_SIZE).getLocatedBlocks();
String bpid=cluster.getNamesystem().getBlockPoolId();
for ( DataNode dn : cluster.getDataNodes()) {
Replica r=DataNodeTestUtils.fetchReplicaInfo(dn,bpid,lb.get(0).getBlock().getBlockId());
assertTrue("Replica on DN " + dn + " shouldn't be null",r != null);
assertEquals("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()",HdfsServerConstants.ReplicaState.RBW,r.getState());
}
ofs.close();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Test replace datanode on failure.
*/
@Test public void testReplaceDatanodeOnFailure() throws Exception {
final Configuration conf=new HdfsConfiguration();
ReplaceDatanodeOnFailure.ALWAYS.write(conf);
final String[] racks=new String[REPLICATION];
Arrays.fill(racks,RACK0);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).racks(racks).numDataNodes(REPLICATION).build();
try {
final DistributedFileSystem fs=cluster.getFileSystem();
final Path dir=new Path(DIR);
final SlowWriter[] slowwriters=new SlowWriter[10];
for (int i=1; i <= slowwriters.length; i++) {
slowwriters[i - 1]=new SlowWriter(fs,new Path(dir,"file" + i),i * 200L);
}
for ( SlowWriter s : slowwriters) {
s.start();
}
sleepSeconds(1);
cluster.startDataNodes(conf,2,true,null,new String[]{RACK1,RACK1});
cluster.stopDataNode(AppendTestUtil.nextInt(REPLICATION));
sleepSeconds(5);
for ( SlowWriter s : slowwriters) {
s.checkReplication();
s.interruptRunning();
}
for ( SlowWriter s : slowwriters) {
s.joinAndClose();
}
LOG.info("Verify the file");
for (int i=0; i < slowwriters.length; i++) {
LOG.info(slowwriters[i].filepath + ": length=" + fs.getFileStatus(slowwriters[i].filepath).getLen());
FSDataInputStream in=null;
try {
in=fs.open(slowwriters[i].filepath);
for (int j=0, x; (x=in.read()) != -1; j++) {
Assert.assertEquals(j,x);
}
}
finally {
IOUtils.closeStream(in);
}
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPendingReplicationRetry() throws IOException {
MiniDFSCluster cluster=null;
int numDataNodes=4;
String testFile="/replication-test-file";
Path testPath=new Path(testFile);
byte buffer[]=new byte[1024];
for (int i=0; i < buffer.length; i++) {
buffer[i]='1';
}
try {
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_REPLICATION_KEY,Integer.toString(numDataNodes));
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
DFSClient dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
OutputStream out=cluster.getFileSystem().create(testPath);
out.write(buffer);
out.close();
waitForBlockReplication(testFile,dfsClient.getNamenode(),numDataNodes,-1);
ExtendedBlock block=dfsClient.getNamenode().getBlockLocations(testFile,0,Long.MAX_VALUE).get(0).getBlock();
cluster.shutdown();
cluster=null;
for (int i=0; i < 25; i++) {
buffer[i]='0';
}
int fileCount=0;
for (int dnIndex=0; dnIndex < 3; dnIndex++) {
File blockFile=MiniDFSCluster.getBlockFile(dnIndex,block);
LOG.info("Checking for file " + blockFile);
if (blockFile != null && blockFile.exists()) {
if (fileCount == 0) {
LOG.info("Deleting file " + blockFile);
assertTrue(blockFile.delete());
}
else {
LOG.info("Corrupting file " + blockFile);
long len=blockFile.length();
assertTrue(len > 50);
RandomAccessFile blockOut=new RandomAccessFile(blockFile,"rw");
try {
blockOut.seek(len / 3);
blockOut.write(buffer,0,25);
}
finally {
blockOut.close();
}
}
fileCount++;
}
}
assertEquals(3,fileCount);
LOG.info("Restarting minicluster after deleting a replica and corrupting 2 crcs");
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_REPLICATION_KEY,Integer.toString(numDataNodes));
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
conf.set("dfs.datanode.block.write.timeout.sec",Integer.toString(5));
conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,"0.75f");
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes * 2).format(false).build();
cluster.waitActive();
dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
waitForBlockReplication(testFile,dfsClient.getNamenode(),numDataNodes,-1);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testBadBlockReportOnTransfer() throws Exception {
Configuration conf=new HdfsConfiguration();
FileSystem fs=null;
DFSClient dfsClient=null;
LocatedBlocks blocks=null;
int replicaCount=0;
short replFactor=1;
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
fs=cluster.getFileSystem();
dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
Path file1=new Path("/tmp/testBadBlockReportOnTransfer/file1");
DFSTestUtil.createFile(fs,file1,1024,replFactor,0);
DFSTestUtil.waitReplication(fs,file1,replFactor);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,file1);
int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block);
assertEquals("Corrupted too few blocks",replFactor,blockFilesCorrupted);
replFactor=2;
fs.setReplication(file1,replFactor);
blocks=dfsClient.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
while (blocks.get(0).isCorrupt() != true) {
try {
LOG.info("Waiting until block is marked as corrupt...");
Thread.sleep(1000);
}
catch ( InterruptedException ie) {
}
blocks=dfsClient.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
}
replicaCount=blocks.get(0).getLocations().length;
assertTrue(replicaCount == 1);
cluster.shutdown();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRollingUpgradeWithQJM() throws Exception {
String nnDirPrefix=MiniDFSCluster.getBaseDirectory() + "/nn/";
final File nn1Dir=new File(nnDirPrefix + "image1");
final File nn2Dir=new File(nnDirPrefix + "image2");
LOG.info("nn1Dir=" + nn1Dir);
LOG.info("nn2Dir=" + nn2Dir);
final Configuration conf=new HdfsConfiguration();
final MiniJournalCluster mjc=new MiniJournalCluster.Builder(conf).build();
setConf(conf,nn1Dir,mjc);
{
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).manageNameDfsDirs(false).checkExitOnShutdown(false).build();
cluster.shutdown();
}
MiniDFSCluster cluster2=null;
try {
FileUtil.fullyDelete(nn2Dir);
FileUtil.copy(nn1Dir,FileSystem.getLocal(conf).getRaw(),new Path(nn2Dir.getAbsolutePath()),false,conf);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageNameDfsDirs(false).checkExitOnShutdown(false).build();
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
final Path baz=new Path("/baz");
final RollingUpgradeInfo info1;
{
final DistributedFileSystem dfs=cluster.getFileSystem();
dfs.mkdirs(foo);
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
info1=dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
LOG.info("START\n" + info1);
Assert.assertEquals(info1,dfs.rollingUpgrade(RollingUpgradeAction.QUERY));
dfs.mkdirs(bar);
cluster.shutdown();
}
final Configuration conf2=setConf(new Configuration(),nn2Dir,mjc);
cluster2=new MiniDFSCluster.Builder(conf2).numDataNodes(0).format(false).manageNameDfsDirs(false).build();
final DistributedFileSystem dfs2=cluster2.getFileSystem();
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertFalse(dfs2.exists(baz));
Assert.assertEquals(info1,dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
dfs2.mkdirs(baz);
LOG.info("RESTART cluster 2");
cluster2.restartNameNode();
Assert.assertEquals(info1,dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz));
try {
cluster2.restartNameNode("-upgrade");
}
catch ( IOException e) {
LOG.info("The exception is expected.",e);
}
LOG.info("RESTART cluster 2 again");
cluster2.restartNameNode();
Assert.assertEquals(info1,dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz));
final RollingUpgradeInfo finalize=dfs2.rollingUpgrade(RollingUpgradeAction.FINALIZE);
LOG.info("FINALIZE: " + finalize);
Assert.assertEquals(info1.getStartTime(),finalize.getStartTime());
LOG.info("RESTART cluster 2 with regular startup option");
cluster2.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster2.restartNameNode();
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz));
}
finally {
if (cluster2 != null) cluster2.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRollbackCommand() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final DFSAdmin dfsadmin=new DFSAdmin(conf);
dfs.mkdirs(foo);
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
Assert.assertEquals(0,dfsadmin.run(new String[]{"-rollingUpgrade","prepare"}));
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
dfs.mkdirs(bar);
NNStorage storage=cluster.getNamesystem().getFSImage().getStorage();
checkNNStorage(storage,3,-1);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
NameNode nn=null;
try {
nn=NameNode.createNameNode(new String[]{"-rollingUpgrade","rollback"},conf);
INode fooNode=nn.getNamesystem().getFSDirectory().getINode4Write(foo.toString());
Assert.assertNotNull(fooNode);
INode barNode=nn.getNamesystem().getFSDirectory().getINode4Write(bar.toString());
Assert.assertNull(barNode);
NNStorage storage=nn.getNamesystem().getFSImage().getStorage();
checkNNStorage(storage,3,7);
}
finally {
if (nn != null) {
nn.stop();
nn.join();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that the NN initializes its under-replicated blocks queue
* before it is ready to exit safemode (HDFS-1476)
*/
@Test(timeout=45000) public void testInitializeReplQueuesEarly() throws Exception {
LOG.info("Starting testInitializeReplQueuesEarly");
BlockManagerTestUtil.setWritingPrefersLocalNode(cluster.getNamesystem().getBlockManager(),false);
cluster.startDataNodes(conf,2,true,StartupOption.REGULAR,null);
cluster.waitActive();
LOG.info("Creating files");
DFSTestUtil.createFile(fs,TEST_PATH,15 * BLOCK_SIZE,(short)1,1L);
LOG.info("Stopping all DataNodes");
List dnprops=Lists.newLinkedList();
dnprops.add(cluster.stopDataNode(0));
dnprops.add(cluster.stopDataNode(0));
dnprops.add(cluster.stopDataNode(0));
cluster.getConfiguration(0).setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,1f / 15f);
LOG.info("Restarting NameNode");
cluster.restartNameNode();
final NameNode nn=cluster.getNameNode();
String status=nn.getNamesystem().getSafemode();
assertEquals("Safe mode is ON. The reported blocks 0 needs additional " + "15 blocks to reach the threshold 0.9990 of total blocks 15.\n" + "The number of live datanodes 0 has reached the minimum number 0. "+ "Safe mode will be turned off automatically once the thresholds "+ "have been reached.",status);
assertFalse("Mis-replicated block queues should not be initialized " + "until threshold is crossed",NameNodeAdapter.safeModeInitializedReplQueues(nn));
LOG.info("Restarting one DataNode");
cluster.restartDataNode(dnprops.remove(0));
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
return getLongCounter("StorageBlockReportOps",getMetrics(NN_METRICS)) == cluster.getStoragesPerDatanode();
}
}
,10,10000);
final int safe=NameNodeAdapter.getSafeModeSafeBlocks(nn);
assertTrue("Expected first block report to make some blocks safe.",safe > 0);
assertTrue("Did not expect first block report to make all blocks safe.",safe < 15);
assertTrue(NameNodeAdapter.safeModeInitializedReplQueues(nn));
BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
long underReplicatedBlocks=nn.getNamesystem().getUnderReplicatedBlocks();
while (underReplicatedBlocks != (15 - safe)) {
LOG.info("UnderReplicatedBlocks expected=" + (15 - safe) + ", actual="+ underReplicatedBlocks);
Thread.sleep(100);
BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
underReplicatedBlocks=nn.getNamesystem().getUnderReplicatedBlocks();
}
cluster.restartDataNodes();
}
APIUtilityVerifier EqualityVerifier
/**
* Junit Test reading while writing.
*/
@Test public void testWriteReadSeq() throws IOException {
useFCOption=false;
positionReadOption=false;
String fname=filenameOption;
long rdBeginPos=0;
int stat=testWriteAndRead(fname,WR_NTIMES,WR_CHUNK_SIZE,rdBeginPos);
LOG.info("Summary status from test1: status= " + stat);
Assert.assertEquals(0,stat);
}
APIUtilityVerifier EqualityVerifier
/**
* Junit Test position read while writing.
*/
@Test public void testWriteReadPos() throws IOException {
String fname=filenameOption;
positionReadOption=true;
long rdBeginPos=0;
int stat=testWriteAndRead(fname,WR_NTIMES,WR_CHUNK_SIZE,rdBeginPos);
Assert.assertEquals(0,stat);
}
APIUtilityVerifier EqualityVerifier
/**
* Junit Test position read of the current block being written.
*/
@Test public void testReadPosCurrentBlock() throws IOException {
String fname=filenameOption;
positionReadOption=true;
int wrChunkSize=(int)(blockSize) + (int)(blockSize / 2);
long rdBeginPos=blockSize + 1;
int numTimes=5;
int stat=testWriteAndRead(fname,numTimes,wrChunkSize,rdBeginPos);
Assert.assertEquals(0,stat);
}
APIUtilityVerifier InternalCallVerifier ConditionMatcher
@Test public void testGetUserGroupInformationSecure() throws IOException {
String userName="user1";
String currentUser="test-user";
NfsConfiguration conf=new NfsConfiguration();
UserGroupInformation currentUserUgi=UserGroupInformation.createRemoteUser(currentUser);
currentUserUgi.setAuthenticationMethod(KERBEROS);
UserGroupInformation.setLoginUser(currentUserUgi);
DFSClientCache cache=new DFSClientCache(conf);
UserGroupInformation ugiResult=cache.getUserGroupInformation(userName,currentUserUgi);
assertThat(ugiResult.getUserName(),is(userName));
assertThat(ugiResult.getRealUser(),is(currentUserUgi));
assertThat(ugiResult.getAuthenticationMethod(),is(UserGroupInformation.AuthenticationMethod.PROXY));
}
APIUtilityVerifier InternalCallVerifier ConditionMatcher
@Test public void testGetUserGroupInformation() throws IOException {
String userName="user1";
String currentUser="currentUser";
UserGroupInformation currentUserUgi=UserGroupInformation.createUserForTesting(currentUser,new String[0]);
NfsConfiguration conf=new NfsConfiguration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY,"hdfs://localhost");
DFSClientCache cache=new DFSClientCache(conf);
UserGroupInformation ugiResult=cache.getUserGroupInformation(userName,currentUserUgi);
assertThat(ugiResult.getUserName(),is(userName));
assertThat(ugiResult.getRealUser(),is(currentUserUgi));
assertThat(ugiResult.getAuthenticationMethod(),is(UserGroupInformation.AuthenticationMethod.PROXY));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testReaddirPlus() throws IOException {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
XDR xdr_req=new XDR();
FileHandle handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(0);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(100);
xdr_req.writeInt(1000);
READDIRPLUS3Response responsePlus=nfsd.readdirplus(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
List direntPlus=responsePlus.getDirListPlus().getEntries();
assertTrue(direntPlus.size() == 5);
status=nn.getRpcServer().getFileInfo(testdir + "/f2");
long f2Id=status.getFileId();
xdr_req=new XDR();
handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(f2Id);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(100);
xdr_req.writeInt(1000);
responsePlus=nfsd.readdirplus(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
direntPlus=responsePlus.getDirListPlus().getEntries();
assertTrue(direntPlus.size() == 1);
EntryPlus3 entryPlus=direntPlus.get(0);
assertTrue(entryPlus.getName().equals("f3"));
hdfs.delete(new Path(testdir + "/f2"),false);
responsePlus=nfsd.readdirplus(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
direntPlus=responsePlus.getDirListPlus().getEntries();
assertTrue(direntPlus.size() == 2);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testReaddirBasic() throws IOException {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
XDR xdr_req=new XDR();
FileHandle handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(0);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(100);
READDIR3Response response=nfsd.readdir(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
List dirents=response.getDirList().getEntries();
assertTrue(dirents.size() == 5);
status=nn.getRpcServer().getFileInfo(testdir + "/f2");
long f2Id=status.getFileId();
xdr_req=new XDR();
handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(f2Id);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(100);
response=nfsd.readdir(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
dirents=response.getDirList().getEntries();
assertTrue(dirents.size() == 1);
Entry3 entry=dirents.get(0);
assertTrue(entry.getName().equals("f3"));
hdfs.delete(new Path(testdir + "/f2"),false);
response=nfsd.readdir(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
dirents=response.getDirList().getEntries();
assertTrue(dirents.size() == 2);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCheckCommit() throws IOException {
DFSClient dfsClient=Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr=new Nfs3FileAttributes();
HdfsDataOutputStream fos=Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long)0);
OpenFileCtx ctx=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
COMMIT_STATUS ret;
ctx.setActiveStatusForTest(false);
Channel ch=Mockito.mock(Channel.class);
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX);
ctx.getPendingWritesForTest().put(new OffsetRange(5,10),new WriteCtx(null,0,0,0,null,null,null,0,false,null));
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE);
ctx.setActiveStatusForTest(true);
Mockito.when(fos.getPos()).thenReturn((long)10);
COMMIT_STATUS status=ctx.checkCommitInternal(5,null,1,attr,false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
ret=ctx.checkCommit(dfsClient,5,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
status=ctx.checkCommitInternal(10,ch,1,attr,false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
ret=ctx.checkCommit(dfsClient,10,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
ConcurrentNavigableMap commits=ctx.getPendingCommitsForTest();
Assert.assertTrue(commits.size() == 0);
ret=ctx.checkCommit(dfsClient,11,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
Assert.assertTrue(commits.size() == 1);
long key=commits.firstKey();
Assert.assertTrue(key == 11);
commits.remove(new Long(11));
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
Assert.assertTrue(commits.size() == 1);
key=commits.firstKey();
Assert.assertTrue(key == 9);
ctx.getPendingWritesForTest().remove(new OffsetRange(5,10));
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier
@Test public void testAlterWriteRequest() throws IOException {
int len=20;
byte[] data=new byte[len];
ByteBuffer buffer=ByteBuffer.wrap(data);
for (int i=0; i < len; i++) {
buffer.put((byte)i);
}
buffer.flip();
int originalCount=buffer.array().length;
WRITE3Request request=new WRITE3Request(new FileHandle(),0,data.length,WriteStableHow.UNSTABLE,buffer);
WriteCtx writeCtx1=new WriteCtx(request.getHandle(),request.getOffset(),request.getCount(),WriteCtx.INVALID_ORIGINAL_COUNT,request.getStableHow(),request.getData(),null,1,false,WriteCtx.DataState.NO_DUMP);
Assert.assertTrue(writeCtx1.getData().array().length == originalCount);
OpenFileCtx.alterWriteRequest(request,12);
WriteCtx writeCtx2=new WriteCtx(request.getHandle(),request.getOffset(),request.getCount(),originalCount,request.getStableHow(),request.getData(),null,2,false,WriteCtx.DataState.NO_DUMP);
ByteBuffer appendedData=writeCtx2.getData();
int position=appendedData.position();
int limit=appendedData.limit();
Assert.assertTrue(position == 12);
Assert.assertTrue(limit - position == 8);
Assert.assertTrue(appendedData.get(position) == (byte)12);
Assert.assertTrue(appendedData.get(position + 1) == (byte)13);
Assert.assertTrue(appendedData.get(position + 2) == (byte)14);
Assert.assertTrue(appendedData.get(position + 7) == (byte)19);
buffer.position(0);
request=new WRITE3Request(new FileHandle(),0,data.length,WriteStableHow.UNSTABLE,buffer);
OpenFileCtx.alterWriteRequest(request,1);
WriteCtx writeCtx3=new WriteCtx(request.getHandle(),request.getOffset(),request.getCount(),originalCount,request.getStableHow(),request.getData(),null,2,false,WriteCtx.DataState.NO_DUMP);
appendedData=writeCtx3.getData();
position=appendedData.position();
limit=appendedData.limit();
Assert.assertTrue(position == 1);
Assert.assertTrue(limit - position == 19);
Assert.assertTrue(appendedData.get(position) == (byte)1);
Assert.assertTrue(appendedData.get(position + 18) == (byte)19);
buffer.position(0);
request=new WRITE3Request(new FileHandle(),0,data.length,WriteStableHow.UNSTABLE,buffer);
OpenFileCtx.alterWriteRequest(request,19);
WriteCtx writeCtx4=new WriteCtx(request.getHandle(),request.getOffset(),request.getCount(),originalCount,request.getStableHow(),request.getData(),null,2,false,WriteCtx.DataState.NO_DUMP);
appendedData=writeCtx4.getData();
position=appendedData.position();
limit=appendedData.limit();
Assert.assertTrue(position == 19);
Assert.assertTrue(limit - position == 1);
Assert.assertTrue(appendedData.get(position) == (byte)19);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testWriteStableHow() throws IOException, InterruptedException {
NfsConfiguration config=new NfsConfiguration();
DFSClient client=null;
MiniDFSCluster cluster=null;
RpcProgramNfs3 nfsd;
SecurityHandler securityHandler=Mockito.mock(SecurityHandler.class);
Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
String currentUser=System.getProperty("user.name");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser),"*");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser),"*");
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
try {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
client=new DFSClient(NameNode.getAddress(config),config);
config.setInt("nfs3.mountd.port",0);
config.setInt("nfs3.server.port",0);
Nfs3 nfs3=new Nfs3(config);
nfs3.startServiceInternal(false);
nfsd=(RpcProgramNfs3)nfs3.getRpcProgram();
HdfsFileStatus status=client.getFileInfo("/");
FileHandle rootHandle=new FileHandle(status.getFileId());
CREATE3Request createReq=new CREATE3Request(rootHandle,"file1",Nfs3Constant.CREATE_UNCHECKED,new SetAttr3(),0);
XDR createXdr=new XDR();
createReq.serialize(createXdr);
CREATE3Response createRsp=nfsd.create(createXdr.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
FileHandle handle=createRsp.getObjHandle();
byte[] buffer=new byte[10];
for (int i=0; i < 10; i++) {
buffer[i]=(byte)i;
}
WRITE3Request writeReq=new WRITE3Request(handle,0,10,WriteStableHow.DATA_SYNC,ByteBuffer.wrap(buffer));
XDR writeXdr=new XDR();
writeReq.serialize(writeXdr);
nfsd.write(writeXdr.asReadOnlyWrap(),null,1,securityHandler,new InetSocketAddress("localhost",1234));
waitWrite(nfsd,handle,60000);
READ3Request readReq=new READ3Request(handle,0,10);
XDR readXdr=new XDR();
readReq.serialize(readXdr);
READ3Response readRsp=nfsd.read(readXdr.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertTrue(Arrays.equals(buffer,readRsp.getData().array()));
CREATE3Request createReq2=new CREATE3Request(rootHandle,"file2",Nfs3Constant.CREATE_UNCHECKED,new SetAttr3(),0);
XDR createXdr2=new XDR();
createReq2.serialize(createXdr2);
CREATE3Response createRsp2=nfsd.create(createXdr2.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
FileHandle handle2=createRsp2.getObjHandle();
WRITE3Request writeReq2=new WRITE3Request(handle2,0,10,WriteStableHow.FILE_SYNC,ByteBuffer.wrap(buffer));
XDR writeXdr2=new XDR();
writeReq2.serialize(writeXdr2);
nfsd.write(writeXdr2.asReadOnlyWrap(),null,1,securityHandler,new InetSocketAddress("localhost",1234));
waitWrite(nfsd,handle2,60000);
READ3Request readReq2=new READ3Request(handle2,0,10);
XDR readXdr2=new XDR();
readReq2.serialize(readXdr2);
READ3Response readRsp2=nfsd.read(readXdr2.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertTrue(Arrays.equals(buffer,readRsp2.getData().array()));
status=client.getFileInfo("/file2");
assertTrue(status.getLen() == 10);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertCheckpointSignature(){
CheckpointSignature s=new CheckpointSignature(getStorageInfo(NodeType.NAME_NODE),"bpid",100,1);
CheckpointSignatureProto sProto=PBHelper.convert(s);
CheckpointSignature s1=PBHelper.convert(sProto);
assertEquals(s.getBlockpoolID(),s1.getBlockpoolID());
assertEquals(s.getClusterID(),s1.getClusterID());
assertEquals(s.getCTime(),s1.getCTime());
assertEquals(s.getCurSegmentTxId(),s1.getCurSegmentTxId());
assertEquals(s.getLayoutVersion(),s1.getLayoutVersion());
assertEquals(s.getMostRecentCheckpointTxId(),s1.getMostRecentCheckpointTxId());
assertEquals(s.getNamespaceID(),s1.getNamespaceID());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertRecoveringBlock(){
DatanodeInfo di1=DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo di2=DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] dnInfo=new DatanodeInfo[]{di1,di2};
RecoveringBlock b=new RecoveringBlock(getExtendedBlock(),dnInfo,3);
RecoveringBlockProto bProto=PBHelper.convert(b);
RecoveringBlock b1=PBHelper.convert(bProto);
assertEquals(b.getBlock(),b1.getBlock());
DatanodeInfo[] dnInfo1=b1.getLocations();
assertEquals(dnInfo.length,dnInfo1.length);
for (int i=0; i < dnInfo.length; i++) {
compare(dnInfo[0],dnInfo1[0]);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertRemoteEditLogManifest(){
List logs=new ArrayList();
logs.add(new RemoteEditLog(1,10));
logs.add(new RemoteEditLog(11,20));
RemoteEditLogManifest m=new RemoteEditLogManifest(logs);
RemoteEditLogManifestProto mProto=PBHelper.convert(m);
RemoteEditLogManifest m1=PBHelper.convert(mProto);
List logs1=m1.getLogs();
assertEquals(logs.size(),logs1.size());
for (int i=0; i < logs.size(); i++) {
compare(logs.get(i),logs1.get(i));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertNamenodeRegistration(){
StorageInfo info=getStorageInfo(NodeType.NAME_NODE);
NamenodeRegistration reg=new NamenodeRegistration("address:999","http:1000",info,NamenodeRole.NAMENODE);
NamenodeRegistrationProto regProto=PBHelper.convert(reg);
NamenodeRegistration reg2=PBHelper.convert(regProto);
assertEquals(reg.getAddress(),reg2.getAddress());
assertEquals(reg.getClusterID(),reg2.getClusterID());
assertEquals(reg.getCTime(),reg2.getCTime());
assertEquals(reg.getHttpAddress(),reg2.getHttpAddress());
assertEquals(reg.getLayoutVersion(),reg2.getLayoutVersion());
assertEquals(reg.getNamespaceID(),reg2.getNamespaceID());
assertEquals(reg.getRegistrationID(),reg2.getRegistrationID());
assertEquals(reg.getRole(),reg2.getRole());
assertEquals(reg.getVersion(),reg2.getVersion());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertExtendedBlock(){
ExtendedBlock b=getExtendedBlock();
ExtendedBlockProto bProto=PBHelper.convert(b);
ExtendedBlock b1=PBHelper.convert(bProto);
assertEquals(b,b1);
b.setBlockId(-1);
bProto=PBHelper.convert(b);
b1=PBHelper.convert(bProto);
assertEquals(b,b1);
}
APIUtilityVerifier EqualityVerifier
@Test public void testConvertLocatedBlockList(){
ArrayList lbl=new ArrayList();
for (int i=0; i < 3; i++) {
lbl.add(createLocatedBlock());
}
List lbpl=PBHelper.convertLocatedBlock2(lbl);
List lbl2=PBHelper.convertLocatedBlock(lbpl);
assertEquals(lbl.size(),lbl2.size());
for (int i=0; i < lbl.size(); i++) {
compare(lbl.get(i),lbl2.get(2));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertStoragInfo(){
StorageInfo info=getStorageInfo(NodeType.NAME_NODE);
StorageInfoProto infoProto=PBHelper.convert(info);
StorageInfo info2=PBHelper.convert(infoProto,NodeType.NAME_NODE);
assertEquals(info.getClusterID(),info2.getClusterID());
assertEquals(info.getCTime(),info2.getCTime());
assertEquals(info.getLayoutVersion(),info2.getLayoutVersion());
assertEquals(info.getNamespaceID(),info2.getNamespaceID());
}
APIUtilityVerifier EqualityVerifier
@Test public void testAclStatusProto(){
AclEntry e=new AclEntry.Builder().setName("test").setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT).setType(AclEntryType.OTHER).build();
AclStatus s=new AclStatus.Builder().owner("foo").group("bar").addEntry(e).build();
Assert.assertEquals(s,PBHelper.convert(PBHelper.convert(s)));
}
APIUtilityVerifier EqualityVerifier
@Test public void testConvertLocatedBlockArray(){
LocatedBlock[] lbl=new LocatedBlock[3];
for (int i=0; i < 3; i++) {
lbl[i]=createLocatedBlock();
}
LocatedBlockProto[] lbpl=PBHelper.convertLocatedBlock(lbl);
LocatedBlock[] lbl2=PBHelper.convertLocatedBlock(lbpl);
assertEquals(lbl.length,lbl2.length);
for (int i=0; i < lbl.length; i++) {
compare(lbl[i],lbl2[i]);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertNamespaceInfo(){
NamespaceInfo info=new NamespaceInfo(37,"clusterID","bpID",2300);
NamespaceInfoProto proto=PBHelper.convert(info);
NamespaceInfo info2=PBHelper.convert(proto);
compare(info,info2);
assertEquals(info.getBlockPoolID(),info2.getBlockPoolID());
assertEquals(info.getBuildVersion(),info2.getBuildVersion());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertDatanodeRegistration(){
DatanodeID dnId=DFSTestUtil.getLocalDatanodeID();
BlockKey[] keys=new BlockKey[]{getBlockKey(2),getBlockKey(3)};
ExportedBlockKeys expKeys=new ExportedBlockKeys(true,9,10,getBlockKey(1),keys);
DatanodeRegistration reg=new DatanodeRegistration(dnId,new StorageInfo(NodeType.DATA_NODE),expKeys,"3.0.0");
DatanodeRegistrationProto proto=PBHelper.convert(reg);
DatanodeRegistration reg2=PBHelper.convert(proto);
compare(reg.getStorageInfo(),reg2.getStorageInfo());
compare(reg.getExportedKeys(),reg2.getExportedKeys());
compare(reg,reg2);
assertEquals(reg.getSoftwareVersion(),reg2.getSoftwareVersion());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertBlockCommand(){
Block[] blocks=new Block[]{new Block(21),new Block(22)};
DatanodeInfo[][] dnInfos=new DatanodeInfo[][]{new DatanodeInfo[1],new DatanodeInfo[2]};
dnInfos[0][0]=DFSTestUtil.getLocalDatanodeInfo();
dnInfos[1][0]=DFSTestUtil.getLocalDatanodeInfo();
dnInfos[1][1]=DFSTestUtil.getLocalDatanodeInfo();
String[][] storageIDs={{"s00"},{"s10","s11"}};
StorageType[][] storageTypes={{StorageType.DEFAULT},{StorageType.DEFAULT,StorageType.DEFAULT}};
BlockCommand bc=new BlockCommand(DatanodeProtocol.DNA_TRANSFER,"bp1",blocks,dnInfos,storageTypes,storageIDs);
BlockCommandProto bcProto=PBHelper.convert(bc);
BlockCommand bc2=PBHelper.convert(bcProto);
assertEquals(bc.getAction(),bc2.getAction());
assertEquals(bc.getBlocks().length,bc2.getBlocks().length);
Block[] blocks2=bc2.getBlocks();
for (int i=0; i < blocks.length; i++) {
assertEquals(blocks[i],blocks2[i]);
}
DatanodeInfo[][] dnInfos2=bc2.getTargets();
assertEquals(dnInfos.length,dnInfos2.length);
for (int i=0; i < dnInfos.length; i++) {
DatanodeInfo[] d1=dnInfos[i];
DatanodeInfo[] d2=dnInfos2[i];
assertEquals(d1.length,d2.length);
for (int j=0; j < d1.length; j++) {
compare(d1[j],d2[j]);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAclEntryProto(){
AclEntry e1=new AclEntry.Builder().setName("test").setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT).setType(AclEntryType.OTHER).build();
AclEntry e2=new AclEntry.Builder().setScope(AclEntryScope.ACCESS).setType(AclEntryType.USER).setPermission(FsAction.ALL).build();
AclEntry e3=new AclEntry.Builder().setScope(AclEntryScope.ACCESS).setType(AclEntryType.USER).setName("test").build();
AclEntry[] expected=new AclEntry[]{e1,e2,new AclEntry.Builder().setScope(e3.getScope()).setType(e3.getType()).setName(e3.getName()).setPermission(FsAction.NONE).build()};
AclEntry[] actual=Lists.newArrayList(PBHelper.convertAclEntry(PBHelper.convertAclEntryProto(Lists.newArrayList(e1,e2,e3)))).toArray(new AclEntry[0]);
Assert.assertArrayEquals(expected,actual);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertBlocksWithLocations(){
BlockWithLocations[] list=new BlockWithLocations[]{getBlockWithLocations(1),getBlockWithLocations(2)};
BlocksWithLocations locs=new BlocksWithLocations(list);
BlocksWithLocationsProto locsProto=PBHelper.convert(locs);
BlocksWithLocations locs2=PBHelper.convert(locsProto);
BlockWithLocations[] blocks=locs.getBlocks();
BlockWithLocations[] blocks2=locs2.getBlocks();
assertEquals(blocks.length,blocks2.length);
for (int i=0; i < blocks.length; i++) {
compare(blocks[i],blocks2[i]);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertBlockRecoveryCommand(){
DatanodeInfo di1=DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo di2=DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] dnInfo=new DatanodeInfo[]{di1,di2};
List blks=ImmutableList.of(new RecoveringBlock(getExtendedBlock(1),dnInfo,3),new RecoveringBlock(getExtendedBlock(2),dnInfo,3));
BlockRecoveryCommand cmd=new BlockRecoveryCommand(blks);
BlockRecoveryCommandProto proto=PBHelper.convert(cmd);
assertEquals(1,proto.getBlocks(0).getBlock().getB().getBlockId());
assertEquals(2,proto.getBlocks(1).getBlock().getB().getBlockId());
BlockRecoveryCommand cmd2=PBHelper.convert(proto);
List cmd2Blks=Lists.newArrayList(cmd2.getRecoveringBlocks());
assertEquals(blks.get(0).getBlock(),cmd2Blks.get(0).getBlock());
assertEquals(blks.get(1).getBlock(),cmd2Blks.get(1).getBlock());
assertEquals(Joiner.on(",").join(blks),Joiner.on(",").join(cmd2Blks));
assertEquals(cmd.toString(),cmd2.toString());
}
APIUtilityVerifier EqualityVerifier
@Test public void testConvertBlock(){
Block b=new Block(1,100,3);
BlockProto bProto=PBHelper.convert(b);
Block b2=PBHelper.convert(bProto);
assertEquals(b,b2);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testStartStop() throws IOException {
Configuration conf=new Configuration();
MiniJournalCluster c=new MiniJournalCluster.Builder(conf).build();
try {
URI uri=c.getQuorumJournalURI("myjournal");
String[] addrs=uri.getAuthority().split(";");
assertEquals(3,addrs.length);
JournalNode node=c.getJournalNode(0);
String dir=node.getConf().get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY);
assertEquals(new File(MiniDFSCluster.getBaseDirectory() + "journalnode-0").getAbsolutePath(),dir);
}
finally {
c.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* Sets up two of the nodes to each drop a single RPC, at all
* possible combinations of RPCs. This may result in the
* active writer failing to write. After this point, a new writer
* should be able to recover and continue writing without
* data loss.
*/
@Test public void testRecoverAfterDoubleFailures() throws Exception {
final long MAX_IPC_NUMBER=determineMaxIpcNumber();
for (int failA=1; failA <= MAX_IPC_NUMBER; failA++) {
for (int failB=1; failB <= MAX_IPC_NUMBER; failB++) {
String injectionStr="(" + failA + ", "+ failB+ ")";
LOG.info("\n\n-------------------------------------------\n" + "Beginning test, failing at " + injectionStr + "\n"+ "-------------------------------------------\n\n");
MiniJournalCluster cluster=new MiniJournalCluster.Builder(conf).build();
QuorumJournalManager qjm=null;
try {
qjm=createInjectableQJM(cluster);
qjm.format(FAKE_NSINFO);
List loggers=qjm.getLoggerSetForTests().getLoggersForTests();
failIpcNumber(loggers.get(0),failA);
failIpcNumber(loggers.get(1),failB);
int lastAckedTxn=doWorkload(cluster,qjm);
if (lastAckedTxn < 6) {
LOG.info("Failed after injecting failures at " + injectionStr + ". This is expected since we injected a failure in the "+ "majority.");
}
qjm.close();
qjm=null;
qjm=createInjectableQJM(cluster);
long lastRecoveredTxn=QJMTestUtil.recoverAndReturnLastTxn(qjm);
assertTrue(lastRecoveredTxn >= lastAckedTxn);
writeSegment(cluster,qjm,lastRecoveredTxn + 1,3,true);
}
catch ( Throwable t) {
throw new RuntimeException("Test failed with injection: " + injectionStr,t);
}
finally {
cluster.shutdown();
cluster=null;
IOUtils.closeStream(qjm);
qjm=null;
}
}
}
}
APIUtilityVerifier IterativeVerifier BooleanVerifier
/**
* Test case in which three JournalNodes randomly flip flop between
* up and down states every time they get an RPC.
* The writer keeps track of the latest ACKed edit, and on every
* recovery operation, ensures that it recovers at least to that
* point or higher. Since at any given point, a majority of JNs
* may be injecting faults, any writer operation is allowed to fail,
* so long as the exception message indicates it failed due to injected
* faults.
* Given a random seed, the test should be entirely deterministic.
*/
@Test public void testRandomized() throws Exception {
long seed;
Long userSpecifiedSeed=Long.getLong(RAND_SEED_PROPERTY);
if (userSpecifiedSeed != null) {
LOG.info("Using seed specified in system property");
seed=userSpecifiedSeed;
((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.ALL);
}
else {
seed=new Random().nextLong();
}
LOG.info("Random seed: " + seed);
Random r=new Random(seed);
MiniJournalCluster cluster=new MiniJournalCluster.Builder(conf).build();
QuorumJournalManager qjmForInitialFormat=createInjectableQJM(cluster);
qjmForInitialFormat.format(FAKE_NSINFO);
qjmForInitialFormat.close();
try {
long txid=0;
long lastAcked=0;
for (int i=0; i < NUM_WRITER_ITERS; i++) {
LOG.info("Starting writer " + i + "\n-------------------");
QuorumJournalManager qjm=createRandomFaultyQJM(cluster,r);
try {
long recovered;
try {
recovered=QJMTestUtil.recoverAndReturnLastTxn(qjm);
}
catch ( Throwable t) {
LOG.info("Failed recovery",t);
checkException(t);
continue;
}
assertTrue("Recovered only up to txnid " + recovered + " but had gotten an ack for "+ lastAcked,recovered >= lastAcked);
txid=recovered + 1;
if (txid > 100 && i % 10 == 1) {
qjm.purgeLogsOlderThan(txid - 100);
}
Holder thrown=new Holder(null);
for (int j=0; j < SEGMENTS_PER_WRITER; j++) {
lastAcked=writeSegmentUntilCrash(cluster,qjm,txid,4,thrown);
if (thrown.held != null) {
LOG.info("Failed write",thrown.held);
checkException(thrown.held);
break;
}
txid+=4;
}
}
finally {
qjm.close();
}
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testQuorums() throws Exception {
Map> futures=ImmutableMap.of("f1",SettableFuture.create(),"f2",SettableFuture.create(),"f3",SettableFuture.create());
QuorumCall q=QuorumCall.create(futures);
assertEquals(0,q.countResponses());
futures.get("f1").set("first future");
q.waitFor(1,0,0,100000,"test");
q.waitFor(0,1,0,100000,"test");
assertEquals(1,q.countResponses());
futures.get("f2").setException(new Exception("error"));
assertEquals(2,q.countResponses());
futures.get("f3").set("second future");
q.waitFor(3,0,100,100000,"test");
q.waitFor(0,2,100,100000,"test");
assertEquals(3,q.countResponses());
assertEquals("f1=first future,f3=second future",Joiner.on(",").withKeyValueSeparator("=").join(new TreeMap(q.getResults())));
try {
q.waitFor(0,4,100,10,"test");
fail("Didn't time out waiting for more responses than came back");
}
catch ( TimeoutException te) {
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testReaderWhileAnotherWrites() throws Exception {
QuorumJournalManager readerQjm=closeLater(createSpyingQJM());
List streams=Lists.newArrayList();
readerQjm.selectInputStreams(streams,0,false);
assertEquals(0,streams.size());
writeSegment(cluster,qjm,1,3,true);
readerQjm.selectInputStreams(streams,0,false);
try {
assertEquals(1,streams.size());
EditLogInputStream stream=streams.get(0);
assertEquals(1,stream.getFirstTxId());
assertEquals(3,stream.getLastTxId());
verifyEdits(streams,1,3);
assertNull(stream.readOp());
}
finally {
IOUtils.cleanup(LOG,streams.toArray(new Closeable[0]));
streams.clear();
}
writeSegment(cluster,qjm,4,3,false);
readerQjm.selectInputStreams(streams,0,false);
try {
assertEquals(1,streams.size());
EditLogInputStream stream=streams.get(0);
assertEquals(1,stream.getFirstTxId());
assertEquals(3,stream.getLastTxId());
verifyEdits(streams,1,3);
}
finally {
IOUtils.cleanup(LOG,streams.toArray(new Closeable[0]));
streams.clear();
}
qjm.finalizeLogSegment(4,6);
readerQjm.selectInputStreams(streams,0,false);
try {
assertEquals(2,streams.size());
assertEquals(4,streams.get(1).getFirstTxId());
assertEquals(6,streams.get(1).getLastTxId());
verifyEdits(streams,1,6);
}
finally {
IOUtils.cleanup(LOG,streams.toArray(new Closeable[0]));
streams.clear();
}
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=20000) public void testCrashBetweenSyncLogAndPersistPaxosData() throws Exception {
JournalFaultInjector faultInjector=JournalFaultInjector.instance=Mockito.mock(JournalFaultInjector.class);
setupLoggers345();
qjm=createSpyingQJM();
spies=qjm.getLoggerSetForTests().getLoggersForTests();
cluster.getJournalNode(2).stopAndJoin(0);
injectIOE().when(spies.get(1)).acceptRecovery(Mockito.any(),Mockito.any());
tryRecoveryExpectingFailure();
cluster.restartJournalNode(2);
qjm=createSpyingQJM();
spies=qjm.getLoggerSetForTests().getLoggersForTests();
injectIOE().when(spies.get(0)).prepareRecovery(Mockito.eq(1L));
Mockito.doThrow(new IOException("Injected")).when(faultInjector).beforePersistPaxosData();
tryRecoveryExpectingFailure();
Mockito.reset(faultInjector);
cluster.getJournalNode(2).stopAndJoin(0);
qjm=createSpyingQJM();
try {
long recovered=QJMTestUtil.recoverAndReturnLastTxn(qjm);
assertTrue(recovered >= 4);
}
finally {
qjm.close();
}
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
/**
* Test the case where one of the loggers misses a finalizeLogSegment()
* call, and then misses the next startLogSegment() call before coming
* back to life.
* Previously, this caused it to keep on writing to the old log segment,
* such that one logger had eg edits_1-10 while the others had edits_1-5 and
* edits_6-10. This caused recovery to fail in certain cases.
*/
@Test public void testMissFinalizeAndNextStart() throws Exception {
futureThrows(new IOException("injected")).when(spies.get(0)).finalizeLogSegment(Mockito.eq(1L),Mockito.eq(3L));
futureThrows(new IOException("injected")).when(spies.get(0)).startLogSegment(Mockito.eq(4L),Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
failLoggerAtTxn(spies.get(1),4L);
writeSegment(cluster,qjm,1,3,true);
EditLogOutputStream stm=qjm.startLogSegment(4,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
try {
writeTxns(stm,4,1);
fail("Did not fail to write");
}
catch ( QuorumException qe) {
GenericTestUtils.assertExceptionContains("Writer out of sync",qe);
}
finally {
stm.abort();
qjm.close();
}
cluster.getJournalNode(2).stopAndJoin(0);
qjm=createSpyingQJM();
long recovered=QJMTestUtil.recoverAndReturnLastTxn(qjm);
assertEquals(3L,recovered);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) public void testHttpServer() throws Exception {
String urlRoot=jn.getHttpServerURI();
String pageContents=DFSTestUtil.urlGet(new URL(urlRoot + "/jmx"));
assertTrue("Bad contents: " + pageContents,pageContents.contains("Hadoop:service=JournalNode,name=JvmMetrics"));
byte[] EDITS_DATA=QJMTestUtil.createTxnData(1,3);
IPCLoggerChannel ch=new IPCLoggerChannel(conf,FAKE_NSINFO,journalId,jn.getBoundIpcAddress());
ch.newEpoch(1).get();
ch.setEpoch(1);
ch.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
ch.sendEdits(1L,1,3,EDITS_DATA).get();
ch.finalizeLogSegment(1,3).get();
byte[] retrievedViaHttp=DFSTestUtil.urlGetBytes(new URL(urlRoot + "/getJournal?segmentTxId=1&jid=" + journalId));
byte[] expected=Bytes.concat(Ints.toByteArray(HdfsConstants.NAMENODE_LAYOUT_VERSION),(new byte[]{0,0,0,0}),EDITS_DATA);
assertArrayEquals(expected,retrievedViaHttp);
URL badUrl=new URL(urlRoot + "/getJournal?segmentTxId=12345&jid=" + journalId);
HttpURLConnection connection=(HttpURLConnection)badUrl.openConnection();
try {
assertEquals(404,connection.getResponseCode());
}
finally {
connection.disconnect();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJournalNodeMXBean() throws Exception {
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=JournalNode,name=JournalNodeInfo");
String journalStatus=(String)mbs.getAttribute(mxbeanName,"JournalsStatus");
assertEquals(jn.getJournalsStatus(),journalStatus);
assertFalse(journalStatus.contains(NAMESERVICE));
final NamespaceInfo FAKE_NSINFO=new NamespaceInfo(12345,"mycluster","my-bp",0L);
jn.getOrCreateJournal(NAMESERVICE).format(FAKE_NSINFO);
journalStatus=(String)mbs.getAttribute(mxbeanName,"JournalsStatus");
assertEquals(jn.getJournalsStatus(),journalStatus);
Map> jMap=new HashMap>();
Map infoMap=new HashMap();
infoMap.put("Formatted","true");
jMap.put(NAMESERVICE,infoMap);
assertEquals(JSON.toString(jMap),journalStatus);
jCluster=new MiniJournalCluster.Builder(new Configuration()).format(false).numJournalNodes(NUM_JN).build();
jn=jCluster.getJournalNode(0);
journalStatus=(String)mbs.getAttribute(mxbeanName,"JournalsStatus");
assertEquals(jn.getJournalsStatus(),journalStatus);
jMap=new HashMap>();
infoMap=new HashMap();
infoMap.put("Formatted","true");
jMap.put(NAMESERVICE,infoMap);
assertEquals(JSON.toString(jMap),journalStatus);
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testDelegationTokenWebHdfsApi() throws Exception {
((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
final String uri=WebHdfsFileSystem.SCHEME + "://" + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
final UserGroupInformation ugi=UserGroupInformation.createUserForTesting("JobTracker",new String[]{"user"});
final WebHdfsFileSystem webhdfs=ugi.doAs(new PrivilegedExceptionAction(){
@Override public WebHdfsFileSystem run() throws Exception {
return (WebHdfsFileSystem)FileSystem.get(new URI(uri),config);
}
}
);
{
Credentials creds=new Credentials();
final Token> tokens[]=webhdfs.addDelegationTokens("JobTracker",creds);
Assert.assertEquals(1,tokens.length);
Assert.assertEquals(1,creds.numberOfTokens());
Assert.assertSame(tokens[0],creds.getAllTokens().iterator().next());
checkTokenIdentifier(ugi,tokens[0]);
final Token> tokens2[]=webhdfs.addDelegationTokens("JobTracker",creds);
Assert.assertEquals(0,tokens2.length);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testWebHdfsDoAs() throws Exception {
WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()");
WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName());
final WebHdfsFileSystem webhdfs=WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi,config,WebHdfsFileSystem.SCHEME);
final Path root=new Path("/");
cluster.getFileSystem().setPermission(root,new FsPermission((short)0777));
Whitebox.setInternalState(webhdfs,"ugi",proxyUgi);
{
Path responsePath=webhdfs.getHomeDirectory();
WebHdfsTestUtil.LOG.info("responsePath=" + responsePath);
Assert.assertEquals(webhdfs.getUri() + "/user/" + PROXY_USER,responsePath.toString());
}
final Path f=new Path("/testWebHdfsDoAs/a.txt");
{
FSDataOutputStream out=webhdfs.create(f);
out.write("Hello, webhdfs user!".getBytes());
out.close();
final FileStatus status=webhdfs.getFileStatus(f);
WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner());
Assert.assertEquals(PROXY_USER,status.getOwner());
}
{
final FSDataOutputStream out=webhdfs.append(f);
out.write("\nHello again!".getBytes());
out.close();
final FileStatus status=webhdfs.getFileStatus(f);
WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner());
WebHdfsTestUtil.LOG.info("status.getLen() =" + status.getLen());
Assert.assertEquals(PROXY_USER,status.getOwner());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* This test writes a file and gets the block locations without closing the
* file, and tests the block token in the last block. Block token is verified
* by ensuring it is of correct kind.
* @throws IOException
* @throws InterruptedException
*/
@Test public void testBlockTokenInLastLocatedBlock() throws IOException, InterruptedException {
Configuration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY,true);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,512);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem fs=cluster.getFileSystem();
String fileName="/testBlockTokenInLastLocatedBlock";
Path filePath=new Path(fileName);
FSDataOutputStream out=fs.create(filePath,(short)1);
out.write(new byte[1000]);
LocatedBlocks locatedBlocks=cluster.getNameNodeRpc().getBlockLocations(fileName,0,1000);
while (locatedBlocks.getLastLocatedBlock() == null) {
Thread.sleep(100);
locatedBlocks=cluster.getNameNodeRpc().getBlockLocations(fileName,0,1000);
}
Token token=locatedBlocks.getLastLocatedBlock().getBlockToken();
Assert.assertEquals(BlockTokenIdentifier.KIND_NAME,token.getKind());
out.close();
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier
/**
* Test that fast repeated invocations of createClientDatanodeProtocolProxy
* will not end up using up thousands of sockets. This is a regression test
* for HDFS-1965.
*/
@Test public void testBlockTokenRpcLeak() throws Exception {
Configuration conf=new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
Assume.assumeTrue(FD_DIR.exists());
BlockTokenSecretManager sm=new BlockTokenSecretManager(blockKeyUpdateInterval,blockTokenLifetime,0,"fake-pool",null);
Token token=sm.generateToken(block3,EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
final Server server=createMockDatanode(sm,token,conf);
server.start();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
DatanodeID fakeDnId=DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b=new ExtendedBlock("fake-pool",new Block(12345L));
LocatedBlock fakeBlock=new LocatedBlock(b,new DatanodeInfo[0]);
fakeBlock.setBlockToken(token);
ClientDatanodeProtocol proxyToNoWhere=RPC.getProxy(ClientDatanodeProtocol.class,ClientDatanodeProtocol.versionID,new InetSocketAddress("1.1.1.1",1),UserGroupInformation.createRemoteUser("junk"),conf,NetUtils.getDefaultSocketFactory(conf));
ClientDatanodeProtocol proxy=null;
int fdsAtStart=countOpenFileDescriptors();
try {
long endTime=Time.now() + 3000;
while (Time.now() < endTime) {
proxy=DFSUtil.createClientDatanodeProtocolProxy(fakeDnId,conf,1000,false,fakeBlock);
assertEquals(block3.getBlockId(),proxy.getReplicaVisibleLength(block3));
if (proxy != null) {
RPC.stopProxy(proxy);
}
LOG.info("Num open fds:" + countOpenFileDescriptors());
}
int fdsAtEnd=countOpenFileDescriptors();
if (fdsAtEnd - fdsAtStart > 50) {
fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
}
}
finally {
server.stop();
}
RPC.stopProxy(proxyToNoWhere);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testBlockTokenRpc() throws Exception {
Configuration conf=new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
BlockTokenSecretManager sm=new BlockTokenSecretManager(blockKeyUpdateInterval,blockTokenLifetime,0,"fake-pool",null);
Token token=sm.generateToken(block3,EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
final Server server=createMockDatanode(sm,token,conf);
server.start();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
final UserGroupInformation ticket=UserGroupInformation.createRemoteUser(block3.toString());
ticket.addToken(token);
ClientDatanodeProtocol proxy=null;
try {
proxy=DFSUtil.createClientDatanodeProtocolProxy(addr,ticket,conf,NetUtils.getDefaultSocketFactory(conf));
assertEquals(block3.getBlockId(),proxy.getReplicaVisibleLength(block3));
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=100000) public void testUnknownDatanode() throws Exception {
Configuration conf=new HdfsConfiguration();
initConf(conf);
long distribution[]=new long[]{50 * CAPACITY / 100,70 * CAPACITY / 100,0 * CAPACITY / 100};
long capacities[]=new long[]{CAPACITY,CAPACITY,CAPACITY};
String racks[]=new String[]{RACK0,RACK1,RACK1};
int numDatanodes=distribution.length;
if (capacities.length != numDatanodes || racks.length != numDatanodes) {
throw new IllegalArgumentException("Array length is not the same");
}
final long totalUsedSpace=sum(distribution);
ExtendedBlock[] blocks=generateBlocks(conf,totalUsedSpace,(short)numDatanodes);
Block[][] blocksDN=distributeBlocks(blocks,(short)(numDatanodes - 1),distribution);
conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,"0.0f");
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).racks(racks).simulatedCapacities(capacities).build();
try {
cluster.waitActive();
client=NameNodeProxies.createProxy(conf,cluster.getFileSystem(0).getUri(),ClientProtocol.class).getProxy();
for (int i=0; i < 3; i++) {
cluster.injectBlocks(i,Arrays.asList(blocksDN[i]),null);
}
cluster.startDataNodes(conf,1,true,null,new String[]{RACK0},null,new long[]{CAPACITY});
cluster.triggerHeartbeats();
Collection namenodes=DFSUtil.getNsServiceRpcUris(conf);
Set datanodes=new HashSet();
datanodes.add(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
Balancer.Parameters p=new Balancer.Parameters(Balancer.Parameters.DEFAULT.policy,Balancer.Parameters.DEFAULT.threshold,datanodes,Balancer.Parameters.DEFAULT.nodesToBeIncluded);
final int r=Balancer.run(namenodes,p,conf);
assertEquals(ExitStatus.SUCCESS.getExitCode(),r);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test a cluster with even distribution, then a new empty node is added to
* the cluster. Test start a cluster with specified number of nodes, and fills
* it to be 30% full (with a single file replicated identically to all
* datanodes); It then adds one new empty node and starts balancing.
*/
@Test(timeout=60000) public void testBalancerWithHANameNodes() throws Exception {
Configuration conf=new HdfsConfiguration();
TestBalancer.initConf(conf);
long newNodeCapacity=TestBalancer.CAPACITY;
String newNodeRack=TestBalancer.RACK2;
String[] racks=new String[]{TestBalancer.RACK0,TestBalancer.RACK1};
long[] capacities=new long[]{TestBalancer.CAPACITY,TestBalancer.CAPACITY};
assertEquals(capacities.length,racks.length);
int numOfDatanodes=capacities.length;
NNConf nn1Conf=new MiniDFSNNTopology.NNConf("nn1");
nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
Configuration copiedConf=new Configuration(conf);
cluster=new MiniDFSCluster.Builder(copiedConf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities).build();
HATestUtil.setFailoverConfigurations(cluster,conf);
try {
cluster.waitActive();
cluster.transitionToActive(1);
Thread.sleep(500);
client=NameNodeProxies.createProxy(conf,FileSystem.getDefaultUri(conf),ClientProtocol.class).getProxy();
long totalCapacity=TestBalancer.sum(capacities);
long totalUsedSpace=totalCapacity * 3 / 10;
TestBalancer.createFile(cluster,TestBalancer.filePath,totalUsedSpace / numOfDatanodes,(short)numOfDatanodes,1);
cluster.startDataNodes(conf,1,true,null,new String[]{newNodeRack},new long[]{newNodeCapacity});
totalCapacity+=newNodeCapacity;
TestBalancer.waitForHeartBeat(totalUsedSpace,totalCapacity,client,cluster);
Collection namenodes=DFSUtil.getNsServiceRpcUris(conf);
assertEquals(1,namenodes.size());
assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster)));
final int r=Balancer.run(namenodes,Balancer.Parameters.DEFAULT,conf);
assertEquals(ExitStatus.SUCCESS.getExitCode(),r);
TestBalancer.waitForBalancer(totalUsedSpace,totalCapacity,client,cluster,Balancer.Parameters.DEFAULT);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier
/**
* Create a cluster with even distribution, and a new empty node is added to
* the cluster, then test rack locality for balancer policy.
*/
@Test(timeout=60000) public void testBalancerWithRackLocality() throws Exception {
Configuration conf=createConf();
long[] capacities=new long[]{CAPACITY,CAPACITY};
String[] racks=new String[]{RACK0,RACK1};
String[] nodeGroups=new String[]{NODEGROUP0,NODEGROUP1};
int numOfDatanodes=capacities.length;
assertEquals(numOfDatanodes,racks.length);
MiniDFSCluster.Builder builder=new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities);
MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
cluster=new MiniDFSClusterWithNodeGroup(builder);
try {
cluster.waitActive();
client=NameNodeProxies.createProxy(conf,cluster.getFileSystem(0).getUri(),ClientProtocol.class).getProxy();
long totalCapacity=TestBalancer.sum(capacities);
long totalUsedSpace=totalCapacity * 3 / 10;
long length=totalUsedSpace / numOfDatanodes;
TestBalancer.createFile(cluster,filePath,length,(short)numOfDatanodes,0);
LocatedBlocks lbs=client.getBlockLocations(filePath.toUri().getPath(),0,length);
Set before=getBlocksOnRack(lbs.getLocatedBlocks(),RACK0);
long newCapacity=CAPACITY;
String newRack=RACK1;
String newNodeGroup=NODEGROUP2;
cluster.startDataNodes(conf,1,true,null,new String[]{newRack},new long[]{newCapacity},new String[]{newNodeGroup});
totalCapacity+=newCapacity;
runBalancerCanFinish(conf,totalUsedSpace,totalCapacity);
lbs=client.getBlockLocations(filePath.toUri().getPath(),0,length);
Set after=getBlocksOnRack(lbs.getLocatedBlocks(),RACK0);
assertEquals(before,after);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testBlockListMoveToHead() throws Exception {
LOG.info("BlockInfo moveToHead tests...");
final int MAX_BLOCKS=10;
DatanodeStorageInfo dd=DFSTestUtil.createDatanodeStorageInfo("s1","1.1.1.1");
ArrayList blockList=new ArrayList(MAX_BLOCKS);
ArrayList blockInfoList=new ArrayList();
int headIndex;
int curIndex;
LOG.info("Building block list...");
for (int i=0; i < MAX_BLOCKS; i++) {
blockList.add(new Block(i,0,GenerationStamp.LAST_RESERVED_STAMP));
blockInfoList.add(new BlockInfo(blockList.get(i),3));
dd.addBlock(blockInfoList.get(i));
assertEquals("Find datanode should be 0",0,blockInfoList.get(i).findStorageInfo(dd));
}
LOG.info("Checking list length...");
assertEquals("Length should be MAX_BLOCK",MAX_BLOCKS,dd.numBlocks());
Iterator it=dd.getBlockIterator();
int len=0;
while (it.hasNext()) {
it.next();
len++;
}
assertEquals("There should be MAX_BLOCK blockInfo's",MAX_BLOCKS,len);
headIndex=dd.getBlockListHeadForTesting().findStorageInfo(dd);
LOG.info("Moving each block to the head of the list...");
for (int i=0; i < MAX_BLOCKS; i++) {
curIndex=blockInfoList.get(i).findStorageInfo(dd);
headIndex=dd.moveBlockToHead(blockInfoList.get(i),curIndex,headIndex);
assertEquals("Block should be at the head of the list now.",blockInfoList.get(i),dd.getBlockListHeadForTesting());
}
LOG.info("Moving head to the head...");
BlockInfo temp=dd.getBlockListHeadForTesting();
curIndex=0;
headIndex=0;
dd.moveBlockToHead(temp,curIndex,headIndex);
assertEquals("Moving head to the head of the list shopuld not change the list",temp,dd.getBlockListHeadForTesting());
LOG.info("Checking elements of the list...");
temp=dd.getBlockListHeadForTesting();
assertNotNull("Head should not be null",temp);
int c=MAX_BLOCKS - 1;
while (temp != null) {
assertEquals("Expected element is not on the list",blockInfoList.get(c--),temp);
temp=temp.getNext(0);
}
LOG.info("Moving random blocks to the head of the list...");
headIndex=dd.getBlockListHeadForTesting().findStorageInfo(dd);
Random rand=new Random();
for (int i=0; i < MAX_BLOCKS; i++) {
int j=rand.nextInt(MAX_BLOCKS);
curIndex=blockInfoList.get(j).findStorageInfo(dd);
headIndex=dd.moveBlockToHead(blockInfoList.get(j),curIndex,headIndex);
assertEquals("Block should be at the head of the list now.",blockInfoList.get(j),dd.getBlockListHeadForTesting());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSafeModeIBR() throws Exception {
DatanodeDescriptor node=spy(nodes.get(0));
DatanodeStorageInfo ds=node.getStorageInfos()[0];
node.setDatanodeUuidForTesting(ds.getStorageID());
node.isAlive=true;
DatanodeRegistration nodeReg=new DatanodeRegistration(node,null,null,"");
doReturn(true).when(fsn).isInStartupSafeMode();
bm.getDatanodeManager().registerDatanode(nodeReg);
bm.getDatanodeManager().addDatanode(node);
assertEquals(node,bm.getDatanodeManager().getDatanode(node));
assertEquals(0,ds.getBlockReportCount());
reset(node);
bm.processReport(node,new DatanodeStorage(ds.getStorageID()),new BlockListAsLongs(null,null));
assertEquals(1,ds.getBlockReportCount());
reset(node);
bm.processReport(node,new DatanodeStorage(ds.getStorageID()),new BlockListAsLongs(null,null));
assertEquals(1,ds.getBlockReportCount());
bm.getDatanodeManager().removeDatanode(node);
reset(node);
bm.getDatanodeManager().registerDatanode(nodeReg);
verify(node).updateRegInfo(nodeReg);
assertEquals(0,ds.getBlockReportCount());
reset(node);
bm.processReport(node,new DatanodeStorage(ds.getStorageID()),new BlockListAsLongs(null,null));
assertEquals(1,ds.getBlockReportCount());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSafeModeIBRAfterIncremental() throws Exception {
DatanodeDescriptor node=spy(nodes.get(0));
DatanodeStorageInfo ds=node.getStorageInfos()[0];
node.setDatanodeUuidForTesting(ds.getStorageID());
node.isAlive=true;
DatanodeRegistration nodeReg=new DatanodeRegistration(node,null,null,"");
doReturn(true).when(fsn).isInStartupSafeMode();
bm.getDatanodeManager().registerDatanode(nodeReg);
bm.getDatanodeManager().addDatanode(node);
assertEquals(node,bm.getDatanodeManager().getDatanode(node));
assertEquals(0,ds.getBlockReportCount());
reset(node);
doReturn(1).when(node).numBlocks();
bm.processReport(node,new DatanodeStorage(ds.getStorageID()),new BlockListAsLongs(null,null));
assertEquals(1,ds.getBlockReportCount());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRead() throws Exception {
MiniDFSCluster cluster=null;
int numDataNodes=2;
Configuration conf=getConf(numDataNodes);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
final NameNode nn=cluster.getNameNode();
final NamenodeProtocols nnProto=nn.getRpcServer();
final BlockManager bm=nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm=bm.getBlockTokenSecretManager();
SecurityTestUtil.setBlockTokenLifetime(sm,1000L);
Path fileToRead=new Path(FILE_TO_READ);
FileSystem fs=cluster.getFileSystem();
createFile(fs,fileToRead);
FSDataInputStream in1=fs.open(fileToRead);
assertTrue(checkFile1(in1));
FSDataInputStream in2=fs.open(fileToRead);
assertTrue(checkFile1(in2));
FSDataInputStream in3=fs.open(fileToRead);
assertTrue(checkFile2(in3));
DFSClient client=null;
try {
client=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
}
finally {
if (client != null) client.close();
}
List locatedBlocks=nnProto.getBlockLocations(FILE_TO_READ,0,FILE_SIZE).getLocatedBlocks();
LocatedBlock lblock=locatedBlocks.get(0);
Token myToken=lblock.getBlockToken();
assertFalse(SecurityTestUtil.isBlockTokenExpired(myToken));
tryRead(conf,lblock,true);
while (!SecurityTestUtil.isBlockTokenExpired(myToken)) {
try {
Thread.sleep(10);
}
catch ( InterruptedException ignored) {
}
}
assertTrue(SecurityTestUtil.isBlockTokenExpired(myToken));
tryRead(conf,lblock,false);
lblock.setBlockToken(sm.generateToken(lblock.getBlock(),EnumSet.of(BlockTokenSecretManager.AccessMode.READ)));
tryRead(conf,lblock,true);
ExtendedBlock wrongBlock=new ExtendedBlock(lblock.getBlock().getBlockPoolId(),lblock.getBlock().getBlockId() + 1);
lblock.setBlockToken(sm.generateToken(wrongBlock,EnumSet.of(BlockTokenSecretManager.AccessMode.READ)));
tryRead(conf,lblock,false);
lblock.setBlockToken(sm.generateToken(lblock.getBlock(),EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE,BlockTokenSecretManager.AccessMode.COPY,BlockTokenSecretManager.AccessMode.REPLACE)));
tryRead(conf,lblock,false);
SecurityTestUtil.setBlockTokenLifetime(sm,600 * 1000L);
List lblocks=DFSTestUtil.getAllBlocks(in1);
for ( LocatedBlock blk : lblocks) {
assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
in1.seek(0);
assertTrue(checkFile1(in1));
List lblocks2=DFSTestUtil.getAllBlocks(in2);
for ( LocatedBlock blk : lblocks2) {
assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
assertTrue(in2.seekToNewSource(0));
assertTrue(checkFile1(in2));
List lblocks3=DFSTestUtil.getAllBlocks(in3);
for ( LocatedBlock blk : lblocks3) {
assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
assertTrue(checkFile2(in3));
assertTrue(cluster.restartDataNodes(true));
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
cluster.shutdownNameNode(0);
lblocks=DFSTestUtil.getAllBlocks(in1);
for ( LocatedBlock blk : lblocks) {
assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
in1.seek(0);
assertTrue(checkFile1(in1));
lblocks2=DFSTestUtil.getAllBlocks(in2);
for ( LocatedBlock blk : lblocks2) {
assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
in2.seekToNewSource(0);
assertTrue(checkFile1(in2));
lblocks3=DFSTestUtil.getAllBlocks(in3);
for ( LocatedBlock blk : lblocks3) {
assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
assertTrue(checkFile2(in3));
cluster.restartNameNode(0);
cluster.shutdownNameNode(0);
in1.seek(0);
assertTrue(checkFile1(in1));
in2.seekToNewSource(0);
assertTrue(checkFile1(in2));
assertTrue(checkFile2(in3));
cluster.restartNameNode(0);
assertTrue(cluster.restartDataNodes(true));
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
cluster.shutdownNameNode(0);
in1.seek(0);
assertFalse(checkFile1(in1));
assertFalse(checkFile2(in3));
cluster.restartNameNode(0);
in1.seek(0);
assertTrue(checkFile1(in1));
in2.seekToNewSource(0);
assertTrue(checkFile1(in2));
assertTrue(checkFile2(in3));
assertTrue(cluster.restartDataNodes(false));
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
in1.seek(0);
assertTrue(checkFile1(in1));
in2.seekToNewSource(0);
assertTrue(checkFile1(in2));
assertTrue(checkFile2(in3));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* testing that APPEND operation can handle token expiration when
* re-establishing pipeline is needed
*/
@Test public void testAppend() throws Exception {
MiniDFSCluster cluster=null;
int numDataNodes=2;
Configuration conf=getConf(numDataNodes);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
final NameNode nn=cluster.getNameNode();
final BlockManager bm=nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm=bm.getBlockTokenSecretManager();
SecurityTestUtil.setBlockTokenLifetime(sm,1000L);
Path fileToAppend=new Path(FILE_TO_APPEND);
FileSystem fs=cluster.getFileSystem();
FSDataOutputStream stm=writeFile(fs,fileToAppend,(short)numDataNodes,BLOCK_SIZE);
stm.write(rawData,0,1);
stm.close();
stm=fs.append(fileToAppend);
int mid=rawData.length - 1;
stm.write(rawData,1,mid - 1);
stm.hflush();
Token token=DFSTestUtil.getBlockToken(stm);
while (!SecurityTestUtil.isBlockTokenExpired(token)) {
try {
Thread.sleep(10);
}
catch ( InterruptedException ignored) {
}
}
cluster.stopDataNode(0);
stm.write(rawData,mid,rawData.length - mid);
stm.close();
FSDataInputStream in5=fs.open(fileToAppend);
assertTrue(checkFile1(in5));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* testing that WRITE operation can handle token expiration when
* re-establishing pipeline is needed
*/
@Test public void testWrite() throws Exception {
MiniDFSCluster cluster=null;
int numDataNodes=2;
Configuration conf=getConf(numDataNodes);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
final NameNode nn=cluster.getNameNode();
final BlockManager bm=nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm=bm.getBlockTokenSecretManager();
SecurityTestUtil.setBlockTokenLifetime(sm,1000L);
Path fileToWrite=new Path(FILE_TO_WRITE);
FileSystem fs=cluster.getFileSystem();
FSDataOutputStream stm=writeFile(fs,fileToWrite,(short)numDataNodes,BLOCK_SIZE);
int mid=rawData.length - 1;
stm.write(rawData,0,mid);
stm.hflush();
Token token=DFSTestUtil.getBlockToken(stm);
while (!SecurityTestUtil.isBlockTokenExpired(token)) {
try {
Thread.sleep(10);
}
catch ( InterruptedException ignored) {
}
}
cluster.stopDataNode(0);
stm.write(rawData,mid,rawData.length - mid);
stm.close();
FSDataInputStream in4=fs.open(fileToWrite);
assertTrue(checkFile1(in4));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testCorruptBlockRereplicatedAcrossRacks() throws Exception {
Configuration conf=getConf();
short REPLICATION_FACTOR=2;
int fileLen=512;
final Path filePath=new Path("/testFile");
String racks[]={"/rack1","/rack1","/rack2","/rack2"};
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
try {
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,fileLen,REPLICATION_FACTOR,1L);
final String fileContent=DFSTestUtil.readFile(fs,filePath);
ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
int dnToCorrupt=DFSTestUtil.firstDnWithBlock(cluster,b);
assertTrue(MiniDFSCluster.corruptReplica(dnToCorrupt,b));
cluster.restartDataNode(dnToCorrupt);
DFSTestUtil.waitCorruptReplicas(fs,ns,filePath,b,1);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
for (int i=0; i < racks.length; i++) {
String blockContent=cluster.readBlockOnDataNode(i,b);
if (blockContent != null && i != dnToCorrupt) {
assertEquals("Corrupt replica",fileContent,blockContent);
}
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testNodeDecomissionWithOverreplicationRespectsRackPolicy() throws Exception {
Configuration conf=getConf();
short REPLICATION_FACTOR=5;
final Path filePath=new Path("/testFile");
FileSystem localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/temp/decommission");
Path excludeFile=new Path(dir,"exclude");
Path includeFile=new Path(dir,"include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys,excludeFile,"");
DFSTestUtil.writeFile(localFileSys,includeFile,"");
conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
String racks[]={"/rack1","/rack2","/rack1","/rack1","/rack1"};
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
try {
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,1L,REPLICATION_FACTOR,1L);
ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
REPLICATION_FACTOR=2;
fs.setReplication(filePath,REPLICATION_FACTOR);
BlockLocation locs[]=fs.getFileBlockLocations(fs.getFileStatus(filePath),0,Long.MAX_VALUE);
for ( String top : locs[0].getTopologyPaths()) {
if (!top.startsWith("/rack2")) {
String name=top.substring("/rack1".length() + 1);
DFSTestUtil.writeFile(localFileSys,excludeFile,name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs,name);
break;
}
}
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testNodeDecomissionRespectsRackPolicy() throws Exception {
Configuration conf=getConf();
short REPLICATION_FACTOR=2;
final Path filePath=new Path("/testFile");
FileSystem localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/temp/decommission");
Path excludeFile=new Path(dir,"exclude");
Path includeFile=new Path(dir,"include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys,excludeFile,"");
DFSTestUtil.writeFile(localFileSys,includeFile,"");
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath());
String racks[]={"/rack1","/rack1","/rack2","/rack2"};
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
try {
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,1L,REPLICATION_FACTOR,1L);
ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
BlockLocation locs[]=fs.getFileBlockLocations(fs.getFileStatus(filePath),0,Long.MAX_VALUE);
String name=locs[0].getNames()[0];
DFSTestUtil.writeFile(localFileSys,excludeFile,name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs,name);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier
@Test public void testReduceReplFactorDueToRejoinRespectsRackPolicy() throws Exception {
Configuration conf=getConf();
short REPLICATION_FACTOR=2;
final Path filePath=new Path("/testFile");
String racks[]={"/rack1","/rack1","/rack2"};
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
final DatanodeManager dm=ns.getBlockManager().getDatanodeManager();
try {
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,1L,REPLICATION_FACTOR,1L);
ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
ArrayList datanodes=cluster.getDataNodes();
assertEquals(3,datanodes.size());
DataNode dataNode=datanodes.get(2);
DatanodeID dnId=dataNode.getDatanodeId();
cluster.stopDataNode(2);
dm.removeDatanode(dnId);
DFSTestUtil.waitForReplication(cluster,b,1,REPLICATION_FACTOR,1);
String rack2[]={"/rack2"};
cluster.startDataNodes(conf,1,true,null,rack2);
cluster.waitActive();
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test processOverReplicatedBlock can handle corrupt replicas fine.
* It make sure that it won't treat corrupt replicas as valid ones
* thus prevents NN deleting valid replicas but keeping
* corrupt ones.
*/
@Test public void testProcesOverReplicateBlock() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
try {
final Path fileName=new Path("/foo1");
DFSTestUtil.createFile(fs,fileName,2,(short)3,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)3);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName);
assertTrue(TestDatanodeBlockScanner.corruptReplica(block,0));
DataNodeProperties dnProps=cluster.stopDataNode(0);
File scanLog=new File(MiniDFSCluster.getFinalizedDir(cluster.getInstanceStorageDir(0,0),cluster.getNamesystem().getBlockPoolId()).getParent().toString() + "/../dncp_block_verification.log.prev");
for (int i=0; !scanLog.delete(); i++) {
assertTrue("Could not delete log file in one minute",i < 60);
try {
Thread.sleep(1000);
}
catch ( InterruptedException ignored) {
}
}
cluster.restartDataNode(dnProps);
DFSTestUtil.waitReplication(fs,fileName,(short)2);
String blockPoolId=cluster.getNamesystem().getBlockPoolId();
final DatanodeID corruptDataNode=DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2),blockPoolId);
final FSNamesystem namesystem=cluster.getNamesystem();
final BlockManager bm=namesystem.getBlockManager();
final HeartbeatManager hm=bm.getDatanodeManager().getHeartbeatManager();
try {
namesystem.writeLock();
synchronized (hm) {
String corruptMachineName=corruptDataNode.getXferAddr();
for ( DatanodeDescriptor datanode : hm.getDatanodes()) {
if (!corruptMachineName.equals(datanode.getXferAddr())) {
datanode.getStorageInfos()[0].setUtilizationForTesting(100L,100L,0,100L);
datanode.updateHeartbeat(BlockManagerTestUtil.getStorageReportsForDatanode(datanode),0L,0L,0,0);
}
}
NameNodeAdapter.setReplication(namesystem,fileName.toString(),(short)1);
assertEquals(1,bm.countNodes(block.getLocalBlock()).liveReplicas());
}
}
finally {
namesystem.writeUnlock();
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test over replicated block should get invalidated when decreasing the
* replication for a partial block.
*/
@Test public void testInvalidateOverReplicatedBlock() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
final FSNamesystem namesystem=cluster.getNamesystem();
final BlockManager bm=namesystem.getBlockManager();
FileSystem fs=cluster.getFileSystem();
Path p=new Path(MiniDFSCluster.getBaseDirectory(),"/foo1");
FSDataOutputStream out=fs.create(p,(short)2);
out.writeBytes("HDFS-3119: " + p);
out.hsync();
fs.setReplication(p,(short)1);
out.close();
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,p);
assertEquals("Expected only one live replica for the block",1,bm.countNodes(block.getLocalBlock()).liveReplicas());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* The test verifies that replica for deletion is chosen on a node,
* with the oldest heartbeat, when this heartbeat is larger than the
* tolerable heartbeat interval.
* It creates a file with several blocks and replication 4.
* The last DN is configured to send heartbeats rarely.
* Test waits until the tolerable heartbeat interval expires, and reduces
* replication of the file. All replica deletions should be scheduled for the
* last node. No replicas will actually be deleted, since last DN doesn't
* send heartbeats.
*/
@Test public void testChooseReplicaToDelete() throws Exception {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,SMALL_BLOCK_SIZE);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
fs=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,300);
cluster.startDataNodes(conf,1,true,null,null,null);
DataNode lastDN=cluster.getDataNodes().get(3);
DatanodeRegistration dnReg=DataNodeTestUtils.getDNRegistrationForBP(lastDN,namesystem.getBlockPoolId());
String lastDNid=dnReg.getDatanodeUuid();
final Path fileName=new Path("/foo2");
DFSTestUtil.createFile(fs,fileName,SMALL_FILE_LENGTH,(short)4,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)4);
DatanodeDescriptor nodeInfo=null;
long lastHeartbeat=0;
long waitTime=DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 * (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
do {
nodeInfo=namesystem.getBlockManager().getDatanodeManager().getDatanode(dnReg);
lastHeartbeat=nodeInfo.getLastUpdate();
}
while (now() - lastHeartbeat < waitTime);
fs.setReplication(fileName,(short)3);
BlockLocation locs[]=fs.getFileBlockLocations(fs.getFileStatus(fileName),0,Long.MAX_VALUE);
namesystem.readLock();
Collection dnBlocks=namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
assertEquals("Replicas on node " + lastDNid + " should have been deleted",SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE,dnBlocks.size());
namesystem.readUnlock();
for ( BlockLocation location : locs) assertEquals("Block should still have 4 replicas",4,location.getNames().length);
}
finally {
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test if DatanodeProtocol#blockReceivedAndDeleted can correctly update the
* pending replications. Also make sure the blockReceivedAndDeleted call is
* idempotent to the pending replications.
*/
@Test public void testBlockReceived() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,1024);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
cluster.waitActive();
DistributedFileSystem hdfs=cluster.getFileSystem();
FSNamesystem fsn=cluster.getNamesystem();
BlockManager blkManager=fsn.getBlockManager();
final String file="/tmp.txt";
final Path filePath=new Path(file);
short replFactor=1;
DFSTestUtil.createFile(hdfs,filePath,1024L,replFactor,0);
ArrayList datanodes=cluster.getDataNodes();
for (int i=0; i < DATANODE_COUNT; i++) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i),true);
}
hdfs.setReplication(filePath,(short)DATANODE_COUNT);
BlockManagerTestUtil.computeAllPendingWork(blkManager);
assertEquals(1,blkManager.pendingReplications.size());
INodeFile fileNode=fsn.getFSDirectory().getINode4Write(file).asFile();
Block[] blocks=fileNode.getBlocks();
assertEquals(DATANODE_COUNT - 1,blkManager.pendingReplications.getNumReplicas(blocks[0]));
LocatedBlock locatedBlock=hdfs.getClient().getLocatedBlocks(file,0).get(0);
DatanodeInfo existingDn=(locatedBlock.getLocations())[0];
int reportDnNum=0;
String poolId=cluster.getNamesystem().getBlockPoolId();
for (int i=0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
DatanodeRegistration dnR=datanodes.get(i).getDNRegistrationForBP(poolId);
StorageReceivedDeletedBlocks[] report={new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored",new ReceivedDeletedBlockInfo[]{new ReceivedDeletedBlockInfo(blocks[0],BlockStatus.RECEIVED_BLOCK,"")})};
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR,poolId,report);
reportDnNum++;
}
}
assertEquals(DATANODE_COUNT - 3,blkManager.pendingReplications.getNumReplicas(blocks[0]));
for (int i=0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
DatanodeRegistration dnR=datanodes.get(i).getDNRegistrationForBP(poolId);
StorageReceivedDeletedBlocks[] report={new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored",new ReceivedDeletedBlockInfo[]{new ReceivedDeletedBlockInfo(blocks[0],BlockStatus.RECEIVED_BLOCK,"")})};
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR,poolId,report);
reportDnNum++;
}
}
assertEquals(DATANODE_COUNT - 3,blkManager.pendingReplications.getNumReplicas(blocks[0]));
for (int i=0; i < DATANODE_COUNT; i++) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i),false);
DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
}
Thread.sleep(5000);
assertEquals(0,blkManager.pendingReplications.size());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
/**
* Test when a block's replica is removed from RBW folder in one of the
* datanode, namenode should ask to invalidate that corrupted block and
* schedule replication for one more replica for that under replicated block.
*/
@Test(timeout=600000) public void testBlockInvalidationWhenRBWReplicaMissedInDN() throws IOException, InterruptedException {
assumeTrue(!Path.WINDOWS);
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,2);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,300);
conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FSDataOutputStream out=null;
try {
final FSNamesystem namesystem=cluster.getNamesystem();
FileSystem fs=cluster.getFileSystem();
Path testPath=new Path("/tmp/TestRBWBlockInvalidation","foo1");
out=fs.create(testPath,(short)2);
out.writeBytes("HDFS-3157: " + testPath);
out.hsync();
cluster.startDataNodes(conf,1,true,null,null,null);
String bpid=namesystem.getBlockPoolId();
ExtendedBlock blk=DFSTestUtil.getFirstBlock(fs,testPath);
Block block=blk.getLocalBlock();
DataNode dn=cluster.getDataNodes().get(0);
File blockFile=DataNodeTestUtils.getBlockFile(dn,bpid,block);
File metaFile=DataNodeTestUtils.getMetaFile(dn,bpid,block);
assertTrue("Could not delete the block file from the RBW folder",blockFile.delete());
assertTrue("Could not delete the block meta file from the RBW folder",metaFile.delete());
out.close();
int liveReplicas=0;
while (true) {
if ((liveReplicas=countReplicas(namesystem,blk).liveReplicas()) < 2) {
LOG.info("Live Replicas after corruption: " + liveReplicas);
break;
}
Thread.sleep(100);
}
assertEquals("There should be less than 2 replicas in the " + "liveReplicasMap",1,liveReplicas);
while (true) {
if ((liveReplicas=countReplicas(namesystem,blk).liveReplicas()) > 1) {
LOG.info("Live Replicas after Rereplication: " + liveReplicas);
break;
}
Thread.sleep(100);
}
assertEquals("There should be two live replicas",2,liveReplicas);
while (true) {
Thread.sleep(100);
if (countReplicas(namesystem,blk).corruptReplicas() == 0) {
LOG.info("Corrupt Replicas becomes 0");
break;
}
}
}
finally {
if (out != null) {
out.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Regression test for HDFS-4799, a case where, upon restart, if there
* were RWR replicas with out-of-date genstamps, the NN could accidentally
* delete good replicas instead of the bad replicas.
*/
@Test(timeout=60000) public void testRWRInvalidation() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,RandomDeleterPolicy.class,BlockPlacementPolicy.class);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
List testPaths=Lists.newArrayList();
for (int i=0; i < 10; i++) {
testPaths.add(new Path("/test" + i));
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
List streams=Lists.newArrayList();
try {
for ( Path path : testPaths) {
FSDataOutputStream out=cluster.getFileSystem().create(path,(short)2);
streams.add(out);
out.writeBytes("old gs data\n");
out.hflush();
}
DataNodeProperties oldGenstampNode=cluster.stopDataNode(0);
for (int i=0; i < streams.size(); i++) {
Path path=testPaths.get(i);
FSDataOutputStream out=streams.get(i);
out.writeBytes("new gs data\n");
out.hflush();
cluster.getFileSystem().setReplication(path,(short)1);
out.close();
}
LOG.info("=========================== restarting cluster");
DataNodeProperties otherNode=cluster.stopDataNode(0);
cluster.restartNameNode();
cluster.restartDataNode(oldGenstampNode);
cluster.waitActive();
cluster.restartDataNode(otherNode);
cluster.waitActive();
cluster.getNameNode().getNamesystem().getBlockManager().computeInvalidateWork(2);
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports();
for ( Path path : testPaths) {
String ret=DFSTestUtil.readFile(cluster.getFileSystem(),path);
assertEquals("old gs data\n" + "new gs data\n",ret);
}
}
finally {
IOUtils.cleanup(LOG,streams.toArray(new Closeable[0]));
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, client is is a node outside of file system.
* So the 1st replica can be placed on any node.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica,
* @throws Exception
*/
@Test public void testChooseTarget5() throws Exception {
DatanodeDescriptor writerDesc=DFSTestUtil.getDatanodeDescriptor("7.7.7.7","/d2/r4");
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,writerDesc);
assertEquals(targets.length,0);
targets=chooseTarget(1,writerDesc);
assertEquals(targets.length,1);
targets=chooseTarget(2,writerDesc);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,writerDesc);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[1]));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testChooseTargetWithStaleNodes() throws Exception {
dataNodes[0].setLastUpdate(Time.now() - staleInterval - 1);
namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
assertTrue(namenode.getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
DatanodeStorageInfo[] targets;
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertEquals(storages[1],targets[0]);
Set excludedNodes=new HashSet();
excludedNodes.add(dataNodes[1]);
List chosenNodes=new ArrayList();
targets=chooseTarget(1,chosenNodes,excludedNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
dataNodes[0].setLastUpdate(Time.now());
namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testChooseTargetWithMoreThanHalfStaleNodes() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,true);
String[] hosts=new String[]{"host1","host2","host3","host4","host5","host6"};
String[] racks=new String[]{"/d1/r1","/d1/r1","/d1/r2","/d1/r2","/d2/r3","/d2/r3"};
MiniDFSCluster miniCluster=new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts).numDataNodes(hosts.length).build();
miniCluster.waitActive();
try {
for (int i=0; i < 2; i++) {
DataNode dn=miniCluster.getDataNodes().get(i);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,true);
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).setLastUpdate(Time.now() - staleInterval - 1);
}
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
int numStaleNodes=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getNumStaleNodes();
assertEquals(numStaleNodes,2);
assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
DatanodeDescriptor staleNodeInfo=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(miniCluster.getDataNodes().get(0).getDatanodeId());
BlockPlacementPolicy replicator=miniCluster.getNameNode().getNamesystem().getBlockManager().getBlockPlacementPolicy();
DatanodeStorageInfo[] targets=replicator.chooseTarget(filename,3,staleNodeInfo,new ArrayList(),false,null,BLOCK_SIZE,StorageType.DEFAULT);
assertEquals(targets.length,3);
assertFalse(isOnSameRack(targets[0],staleNodeInfo));
for (int i=0; i < 4; i++) {
DataNode dn=miniCluster.getDataNodes().get(i);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,true);
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).setLastUpdate(Time.now() - staleInterval - 1);
}
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
numStaleNodes=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getNumStaleNodes();
assertEquals(numStaleNodes,4);
assertFalse(miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
targets=replicator.chooseTarget(filename,3,staleNodeInfo,new ArrayList(),false,null,BLOCK_SIZE,StorageType.DEFAULT);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(targets[0],staleNodeInfo));
for (int i=2; i < 4; i++) {
DataNode dn=miniCluster.getDataNodes().get(i);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,false);
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).setLastUpdate(Time.now());
}
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
numStaleNodes=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getNumStaleNodes();
assertEquals(numStaleNodes,2);
assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
targets=chooseTarget(3,staleNodeInfo);
assertEquals(targets.length,3);
assertFalse(isOnSameRack(targets[0],staleNodeInfo));
}
finally {
miniCluster.shutdown();
}
}
APIUtilityVerifier BooleanVerifier
/**
* This testcase tests whether an IllegalArgumentException
* will be thrown when a value greater than 1 is retrieved by
* DFSUtil#getInvalidateWorkPctPerIteration
*/
@Test public void testGetInvalidateWorkPctPerIteration_GreaterThanOne(){
Configuration conf=new Configuration();
float blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertTrue(blocksInvalidateWorkPct > 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"1.5f");
exception.expect(IllegalArgumentException.class);
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests whether the value returned by
* DFSUtil.getReplWorkMultiplier() is positive,
* and whether an IllegalArgumentException will be thrown
* when a non-positive value is retrieved
*/
@Test public void testGetReplWorkMultiplier(){
Configuration conf=new Configuration();
int blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf);
assertTrue(blocksReplWorkMultiplier > 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"3");
blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf);
assertEquals(blocksReplWorkMultiplier,3);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"-1");
exception.expect(IllegalArgumentException.class);
blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests re-replication, when dataNodes[0] is already chosen.
* So the 1st replica can be placed on random rack.
* the 2nd replica should be placed on different node by same rack as
* the 1st replica. The 3rd replica can be placed randomly.
* @throws Exception
*/
@Test public void testRereplicate1() throws Exception {
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,chosenNodes);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[0],targets[2]));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, client is dataNodes[0], but the dataNodes[1] is
* not allowed to be chosen. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on a different
* rack, the 3rd should be on same rack as the 2nd replica, and the rest
* should be placed on a third rack.
* @throws Exception
*/
@Test public void testChooseTarget2() throws Exception {
Set excludedNodes;
DatanodeStorageInfo[] targets;
List chosenNodes=new ArrayList();
excludedNodes=new HashSet();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(0,chosenNodes,excludedNodes);
assertEquals(targets.length,0);
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(1,chosenNodes,excludedNodes);
assertEquals(targets.length,1);
assertEquals(storages[0],targets[0]);
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(2,chosenNodes,excludedNodes);
assertEquals(targets.length,2);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(3,chosenNodes,excludedNodes);
assertEquals(targets.length,3);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
assertTrue(isOnSameRack(targets[1],targets[2]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(4,chosenNodes,excludedNodes);
assertEquals(targets.length,4);
assertEquals(storages[0],targets[0]);
for (int i=1; i < 4; i++) {
assertFalse(isOnSameRack(targets[0],targets[i]));
}
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[1],targets[3]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
chosenNodes.add(storages[2]);
targets=replicator.chooseTarget(filename,1,dataNodes[0],chosenNodes,true,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT);
System.out.println("targets=" + Arrays.asList(targets));
assertEquals(2,targets.length);
int i=0;
for (; i < targets.length && !storages[2].equals(targets[i]); i++) ;
assertTrue(i < targets.length);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[2] are already chosen.
* So the 1st replica should be placed on the rack that the writer resides.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test public void testRereplicate3() throws Exception {
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[2]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertTrue(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[0],dataNodes[2]));
targets=chooseTarget(1,dataNodes[2],chosenNodes);
assertEquals(targets.length,1);
assertTrue(isOnSameRack(targets[0],dataNodes[2]));
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2,dataNodes[2],chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(targets[0],dataNodes[2]));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, it tries to choose more targets than available nodes and
* check the result.
* @throws Exception
*/
@Test public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
for (int i=0; i < 2; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
}
final LogVerificationAppender appender=new LogVerificationAppender();
final Logger logger=Logger.getRootLogger();
logger.addAppender(appender);
DatanodeStorageInfo[] targets=chooseTarget(NUM_OF_DATANODES);
assertEquals(targets.length,NUM_OF_DATANODES - 2);
final List log=appender.getLog();
assertNotNull(log);
assertFalse(log.size() == 0);
final LoggingEvent lastLogEntry=log.get(log.size() - 1);
assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));
for (int i=0; i < 2; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests whether the default value returned by
* DFSUtil.getInvalidateWorkPctPerIteration() is positive,
* and whether an IllegalArgumentException will be thrown
* when 0.0f is retrieved
*/
@Test public void testGetInvalidateWorkPctPerIteration(){
Configuration conf=new Configuration();
float blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertTrue(blocksInvalidateWorkPct > 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"0.5f");
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertEquals(blocksInvalidateWorkPct,0.5f,blocksInvalidateWorkPct * 1e-7);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"1.0f");
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertEquals(blocksInvalidateWorkPct,1.0f,blocksInvalidateWorkPct * 1e-7);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"0.0f");
exception.expect(IllegalArgumentException.class);
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[1] are already chosen.
* So the 1st replica should be placed on a different rack than rack 1.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test public void testRereplicate2() throws Exception {
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[1]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[1],dataNodes[0]));
}
APIUtilityVerifier BooleanVerifier
/**
* This testcase tests whether an IllegalArgumentException
* will be thrown when a negative value is retrieved by
* DFSUtil#getInvalidateWorkPctPerIteration
*/
@Test public void testGetInvalidateWorkPctPerIteration_NegativeValue(){
Configuration conf=new Configuration();
float blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertTrue(blocksInvalidateWorkPct > 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"-0.5f");
exception.expect(IllegalArgumentException.class);
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests that chooseTarget with considerLoad set to true correctly calculates
* load with decommissioned nodes.
*/
@Test public void testChooseTargetWithDecomNodes() throws IOException {
namenode.getNamesystem().writeLock();
try {
String blockPoolId=namenode.getNamesystem().getBlockPoolId();
dnManager.handleHeartbeat(dnrList.get(3),BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),blockPoolId,dataNodes[3].getCacheCapacity(),dataNodes[3].getCacheRemaining(),2,0,0);
dnManager.handleHeartbeat(dnrList.get(4),BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[4]),blockPoolId,dataNodes[4].getCacheCapacity(),dataNodes[4].getCacheRemaining(),4,0,0);
dnManager.handleHeartbeat(dnrList.get(5),BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[5]),blockPoolId,dataNodes[5].getCacheCapacity(),dataNodes[5].getCacheRemaining(),4,0,0);
final int load=2 + 4 + 4;
FSNamesystem fsn=namenode.getNamesystem();
assertEquals((double)load / 6,fsn.getInServiceXceiverAverage(),EPSILON);
for (int i=0; i < 3; i++) {
DatanodeDescriptor d=dnManager.getDatanode(dnrList.get(i));
dnManager.startDecommission(d);
d.setDecommissioned();
}
assertEquals((double)load / 3,fsn.getInServiceXceiverAverage(),EPSILON);
DatanodeStorageInfo[] targets=namenode.getNamesystem().getBlockManager().getBlockPlacementPolicy().chooseTarget("testFile.txt",3,dataNodes[0],new ArrayList(),false,null,1024,StorageType.DEFAULT);
assertEquals(3,targets.length);
Set targetSet=new HashSet(Arrays.asList(targets));
for (int i=3; i < storages.length; i++) {
assertTrue(targetSet.contains(storages[i]));
}
}
finally {
dataNodes[0].stopDecommission();
dataNodes[1].stopDecommission();
dataNodes[2].stopDecommission();
namenode.getNamesystem().writeUnlock();
}
NameNode.LOG.info("Done working on it");
}
APIUtilityVerifier IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testChooseTargetWithDependencies() throws Exception {
for (int i=0; i < NUM_OF_DATANODES; i++) {
cluster.remove(dataNodes[i]);
}
for (int i=0; i < NUM_OF_DATANODES_MORE_TARGETS; i++) {
DatanodeDescriptor node=dataNodesInMoreTargetsCase[i];
if (cluster.contains(node)) {
cluster.remove(node);
}
}
Host2NodesMap host2DatanodeMap=namenode.getNamesystem().getBlockManager().getDatanodeManager().getHost2DatanodeMap();
for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
cluster.add(dataNodesForDependencies[i]);
host2DatanodeMap.add(dataNodesForDependencies[i]);
}
dataNodesForDependencies[1].addDependentHostName(dataNodesForDependencies[2].getHostName());
dataNodesForDependencies[2].addDependentHostName(dataNodesForDependencies[1].getHostName());
dataNodesForDependencies[3].addDependentHostName(dataNodesForDependencies[4].getHostName());
dataNodesForDependencies[4].addDependentHostName(dataNodesForDependencies[3].getHostName());
for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
updateHeartbeatWithUsage(dataNodesForDependencies[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
List chosenNodes=new ArrayList();
DatanodeStorageInfo[] targets;
Set excludedNodes=new HashSet();
excludedNodes.add(dataNodesForDependencies[5]);
targets=chooseTarget(3,dataNodesForDependencies[1],chosenNodes,excludedNodes);
assertEquals(targets.length,2);
assertEquals(targets[0],storagesForDependencies[1]);
assertTrue(targets[1].equals(storagesForDependencies[3]) || targets[1].equals(storagesForDependencies[4]));
assertEquals(excludedNodes.size(),NUM_OF_DATANODES_FOR_DEPENDENCIES);
for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
assertTrue(excludedNodes.contains(dataNodesForDependencies[i]));
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[3] are already chosen.
* So the 1st replica should be placed on the rack that the writer resides.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test public void testRereplicate3() throws Exception {
setupDataNodeCapacity();
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[3]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertTrue(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameRack(dataNodes[3],targets[0]));
targets=chooseTarget(1,dataNodes[3],chosenNodes);
assertEquals(targets.length,1);
assertTrue(isOnSameRack(dataNodes[3],targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[3],targets[0]));
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[0],targets[0]));
targets=chooseTarget(2,dataNodes[3],chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(dataNodes[3],targets[0]));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[1] are already chosen.
* So the 1st replica should be placed on a different rack of rack 1.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test public void testRereplicate2() throws Exception {
setupDataNodeCapacity();
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[1]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(dataNodes[0],targets[0]) && isOnSameRack(dataNodes[0],targets[1]));
}
APIUtilityVerifier BooleanVerifier
/**
* Test re-replication policy in boundary case.
* Rack 2 has only one node group & the node in this node group is chosen
* Rack 1 has two nodegroups & one of them is chosen.
* Replica policy should choose the node from node group of Rack1 but not the
* same nodegroup with chosen nodes.
*/
@Test public void testRereplicateOnBoundaryTopology() throws Exception {
for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) {
updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
List chosenNodes=new ArrayList();
chosenNodes.add(storagesInBoundaryCase[0]);
chosenNodes.add(storagesInBoundaryCase[5]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(1,dataNodesInBoundaryCase[0],chosenNodes);
assertFalse(isOnSameNodeGroup(dataNodesInBoundaryCase[0],targets[0]));
assertFalse(isOnSameNodeGroup(dataNodesInBoundaryCase[5],targets[0]));
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests re-replication, when dataNodes[0] is already chosen.
* So the 1st replica can be placed on random rack.
* the 2nd replica should be placed on different node and nodegroup by same rack as
* the 1st replica. The 3rd replica can be placed randomly.
* @throws Exception
*/
@Test public void testRereplicate1() throws Exception {
setupDataNodeCapacity();
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,chosenNodes);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[0],targets[0]));
assertFalse(isOnSameRack(targets[0],targets[2]));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, client is is a node outside of file system.
* So the 1st replica can be placed on any node.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica,
* @throws Exception
*/
@Test public void testChooseTarget5() throws Exception {
setupDataNodeCapacity();
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,NODE);
assertEquals(targets.length,0);
targets=chooseTarget(1,NODE);
assertEquals(targets.length,1);
targets=chooseTarget(2,NODE);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,NODE);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[1]));
verifyNoTwoTargetsOnSameNodeGroup(targets);
}
APIUtilityVerifier NullVerifier
/**
* Test for a relative path, os independent
* @throws IOException
*/
@Test public void testRelativePathAsURI() throws IOException {
URI u=Util.stringAsURI(RELATIVE_FILE_PATH);
LOG.info("Uri: " + u);
assertNotNull(u);
}
APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test for an OS dependent absolute paths.
* @throws IOException
*/
@Test public void testAbsolutePathAsURI() throws IOException {
URI u=null;
u=Util.stringAsURI(ABSOLUTE_PATH_WINDOWS);
assertNotNull("Uri should not be null for Windows path" + ABSOLUTE_PATH_WINDOWS,u);
assertEquals(URI_FILE_SCHEMA,u.getScheme());
u=Util.stringAsURI(ABSOLUTE_PATH_UNIX);
assertNotNull("Uri should not be null for Unix path" + ABSOLUTE_PATH_UNIX,u);
assertEquals(URI_FILE_SCHEMA,u.getScheme());
}
APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test for a URI
* @throws IOException
*/
@Test public void testURI() throws IOException {
LOG.info("Testing correct Unix URI: " + URI_UNIX);
URI u=Util.stringAsURI(URI_UNIX);
LOG.info("Uri: " + u);
assertNotNull("Uri should not be null at this point",u);
assertEquals(URI_FILE_SCHEMA,u.getScheme());
assertEquals(URI_PATH_UNIX,u.getPath());
LOG.info("Testing correct windows URI: " + URI_WINDOWS);
u=Util.stringAsURI(URI_WINDOWS);
LOG.info("Uri: " + u);
assertNotNull("Uri should not be null at this point",u);
assertEquals(URI_FILE_SCHEMA,u.getScheme());
assertEquals(URI_PATH_WINDOWS.replace("%20"," "),u.getPath());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetUgiFromToken() throws IOException {
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"hdfs://localhost:4321/");
ServletContext context=mock(ServletContext.class);
String realUser="TheDoctor";
String user="TheNurse";
conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi;
HttpServletRequest request;
Text ownerText=new Text(user);
DelegationTokenIdentifier dtId=new DelegationTokenIdentifier(ownerText,ownerText,new Text(realUser));
Token token=new Token(dtId,new DummySecretManager(0,0,0,0));
String tokenString=token.encodeToUrlString();
request=getMockRequest(null,null,null);
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString);
ugi=JspHelper.getUGI(context,request,conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser);
Assert.assertEquals(ugi.getShortUserName(),user);
checkUgiFromToken(ugi);
request=getMockRequest(realUser,null,null);
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString);
ugi=JspHelper.getUGI(context,request,conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser);
Assert.assertEquals(ugi.getShortUserName(),user);
checkUgiFromToken(ugi);
request=getMockRequest("rogue",null,null);
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString);
ugi=JspHelper.getUGI(context,request,conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser);
Assert.assertEquals(ugi.getShortUserName(),user);
checkUgiFromToken(ugi);
request=getMockRequest(null,user,null);
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString);
ugi=JspHelper.getUGI(context,request,conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser);
Assert.assertEquals(ugi.getShortUserName(),user);
checkUgiFromToken(ugi);
request=getMockRequest(null,null,"rogue");
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString);
try {
JspHelper.getUGI(context,request,conf);
Assert.fail("bad request allowed");
}
catch ( IOException ioe) {
Assert.assertEquals("Usernames not matched: name=rogue != expected=" + user,ioe.getMessage());
}
request=getMockRequest(null,user,"rogue");
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString);
try {
JspHelper.getUGI(context,request,conf);
Assert.fail("bad request allowed");
}
catch ( IOException ioe) {
Assert.assertEquals("Usernames not matched: name=rogue != expected=" + user,ioe.getMessage());
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetNonProxyUgi() throws IOException {
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"hdfs://localhost:4321/");
ServletContext context=mock(ServletContext.class);
String realUser="TheDoctor";
String user="TheNurse";
conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi;
HttpServletRequest request;
request=getMockRequest(null,null,null);
try {
JspHelper.getUGI(context,request,conf);
Assert.fail("bad request allowed");
}
catch ( IOException ioe) {
Assert.assertEquals("Security enabled but user not authenticated by filter",ioe.getMessage());
}
request=getMockRequest(null,realUser,null);
try {
JspHelper.getUGI(context,request,conf);
Assert.fail("bad request allowed");
}
catch ( IOException ioe) {
Assert.assertEquals("Security enabled but user not authenticated by filter",ioe.getMessage());
}
request=getMockRequest(realUser,null,null);
ugi=JspHelper.getUGI(context,request,conf);
Assert.assertNull(ugi.getRealUser());
Assert.assertEquals(ugi.getShortUserName(),realUser);
checkUgiFromAuth(ugi);
request=getMockRequest(realUser,realUser,null);
ugi=JspHelper.getUGI(context,request,conf);
Assert.assertNull(ugi.getRealUser());
Assert.assertEquals(ugi.getShortUserName(),realUser);
checkUgiFromAuth(ugi);
request=getMockRequest(realUser,user,null);
try {
JspHelper.getUGI(context,request,conf);
Assert.fail("bad request allowed");
}
catch ( IOException ioe) {
Assert.assertEquals("Usernames not matched: name=" + user + " != expected="+ realUser,ioe.getMessage());
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testReadWriteReplicaState(){
try {
DataOutputBuffer out=new DataOutputBuffer();
DataInputBuffer in=new DataInputBuffer();
for ( HdfsServerConstants.ReplicaState repState : HdfsServerConstants.ReplicaState.values()) {
repState.write(out);
in.reset(out.getData(),out.getLength());
HdfsServerConstants.ReplicaState result=HdfsServerConstants.ReplicaState.read(in);
assertTrue("testReadWrite error !!!",repState == result);
out.reset();
in.reset();
}
}
catch ( Exception ex) {
fail("testReadWrite ex error ReplicaState");
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetProxyUgi() throws IOException {
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"hdfs://localhost:4321/");
ServletContext context=mock(ServletContext.class);
String realUser="TheDoctor";
String user="TheNurse";
conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(realUser),"*");
conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(realUser),"*");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi;
HttpServletRequest request;
request=getMockRequest(null,null,user);
try {
JspHelper.getUGI(context,request,conf);
Assert.fail("bad request allowed");
}
catch ( IOException ioe) {
Assert.assertEquals("Security enabled but user not authenticated by filter",ioe.getMessage());
}
request=getMockRequest(null,realUser,user);
try {
JspHelper.getUGI(context,request,conf);
Assert.fail("bad request allowed");
}
catch ( IOException ioe) {
Assert.assertEquals("Security enabled but user not authenticated by filter",ioe.getMessage());
}
request=getMockRequest(realUser,null,user);
ugi=JspHelper.getUGI(context,request,conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser);
Assert.assertEquals(ugi.getShortUserName(),user);
checkUgiFromAuth(ugi);
request=getMockRequest(realUser,realUser,user);
ugi=JspHelper.getUGI(context,request,conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser);
Assert.assertEquals(ugi.getShortUserName(),user);
checkUgiFromAuth(ugi);
request=getMockRequest(realUser,user,user);
try {
JspHelper.getUGI(context,request,conf);
Assert.fail("bad request allowed");
}
catch ( IOException ioe) {
Assert.assertEquals("Usernames not matched: name=" + user + " != expected="+ realUser,ioe.getMessage());
}
try {
request=getMockRequest(user,null,realUser);
JspHelper.getUGI(context,request,conf);
Assert.fail("bad proxy request allowed");
}
catch ( AuthorizationException ae) {
Assert.assertEquals("User: " + user + " is not allowed to impersonate "+ realUser,ae.getMessage());
}
try {
request=getMockRequest(user,user,realUser);
JspHelper.getUGI(context,request,conf);
Assert.fail("bad proxy request allowed");
}
catch ( AuthorizationException ae) {
Assert.assertEquals("User: " + user + " is not allowed to impersonate "+ realUser,ae.getMessage());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* The test set the configuration parameters for a large block size and
* restarts initiated single-node cluster.
* Then it writes a file > block_size and closes it.
* The second datanode is started in the cluster.
* As soon as the replication process is started and at least one TEMPORARY
* replica is found test forces BlockReport process and checks
* if the TEMPORARY replica isn't reported on it.
* Eventually, the configuration is being restored into the original state.
* @throws IOException in case of an error
*/
@Test(timeout=300000) public void blockReport_08() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
Path filePath=new Path("/" + METHOD_NAME + ".dat");
final int DN_N1=DN_N0 + 1;
final int bytesChkSum=1024 * 1000;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,bytesChkSum);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,6 * bytesChkSum);
shutDownCluster();
startUpCluster();
try {
ArrayList blocks=writeFile(METHOD_NAME,12 * bytesChkSum,filePath);
Block bl=findBlock(filePath,12 * bytesChkSum);
BlockChecker bc=new BlockChecker(filePath);
bc.start();
waitForTempReplica(bl,DN_N1);
DataNode dn=cluster.getDataNodes().get(DN_N1);
String poolId=cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn,poolId,false,false);
sendBlockReports(dnR,poolId,reports);
printStats();
assertEquals("Wrong number of PendingReplication blocks",blocks.size(),cluster.getNamesystem().getPendingReplicationBlocks());
try {
bc.join();
}
catch ( InterruptedException e) {
}
}
finally {
resetConfiguration();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Test write a file, verifies and closes it. Then the length of the blocks
* are messed up and BlockReport is forced.
* The modification of blocks' length has to be ignored
* @throws java.io.IOException on an error
*/
@Test(timeout=300000) public void blockReport_01() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
Path filePath=new Path("/" + METHOD_NAME + ".dat");
ArrayList blocks=prepareForRide(filePath,METHOD_NAME,FILE_SIZE);
if (LOG.isDebugEnabled()) {
LOG.debug("Number of blocks allocated " + blocks.size());
}
long[] oldLengths=new long[blocks.size()];
int tempLen;
for (int i=0; i < blocks.size(); i++) {
Block b=blocks.get(i);
if (LOG.isDebugEnabled()) {
LOG.debug("Block " + b.getBlockName() + " before\t"+ "Size "+ b.getNumBytes());
}
oldLengths[i]=b.getNumBytes();
if (LOG.isDebugEnabled()) {
LOG.debug("Setting new length");
}
tempLen=rand.nextInt(BLOCK_SIZE);
b.set(b.getBlockId(),tempLen,b.getGenerationStamp());
if (LOG.isDebugEnabled()) {
LOG.debug("Block " + b.getBlockName() + " after\t "+ "Size "+ b.getNumBytes());
}
}
DataNode dn=cluster.getDataNodes().get(DN_N0);
String poolId=cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn,poolId,false,false);
sendBlockReports(dnR,poolId,reports);
List blocksAfterReport=DFSTestUtil.getAllBlocks(fs.open(filePath));
if (LOG.isDebugEnabled()) {
LOG.debug("After mods: Number of blocks allocated " + blocksAfterReport.size());
}
for (int i=0; i < blocksAfterReport.size(); i++) {
ExtendedBlock b=blocksAfterReport.get(i).getBlock();
assertEquals("Length of " + i + "th block is incorrect",oldLengths[i],b.getNumBytes());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test datanode block pool initialization error handling.
* Failure in initializing a block pool should not cause NPE.
*/
@Test public void testBPInitErrorHandling() throws Exception {
final DataNode mockDn=Mockito.mock(DataNode.class);
Mockito.doReturn(true).when(mockDn).shouldRun();
Configuration conf=new Configuration();
File dnDataDir=new File(new File(TEST_BUILD_DATA,"testBPInitErrorHandling"),"data");
conf.set(DFS_DATANODE_DATA_DIR_KEY,dnDataDir.toURI().toString());
Mockito.doReturn(conf).when(mockDn).getConf();
Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
Mockito.doReturn(DataNodeMetrics.create(conf,"fake dn")).when(mockDn).getMetrics();
final AtomicInteger count=new AtomicInteger();
Mockito.doAnswer(new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
if (count.getAndIncrement() == 0) {
throw new IOException("faked initBlockPool exception");
}
Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
return null;
}
}
).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));
BPOfferService bpos=setupBPOSForNNs(mockDn,mockNN1,mockNN2);
List actors=bpos.getBPServiceActors();
assertEquals(2,actors.size());
bpos.start();
try {
waitForInitialization(bpos);
waitForBlockReport(mockNN1,mockNN2);
}
finally {
bpos.stop();
}
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier NullVerifier HybridVerifier
/**
* Test that the DataNode determines the active NameNode correctly
* based on the HA-related information in heartbeat responses.
* See HDFS-2627.
*/
@Test public void testPickActiveNameNode() throws Exception {
BPOfferService bpos=setupBPOSForNNs(mockNN1,mockNN2);
bpos.start();
try {
waitForInitialization(bpos);
assertNull(bpos.getActiveNN());
mockHaStatuses[0]=new NNHAStatusHeartbeat(HAServiceState.ACTIVE,1);
bpos.triggerHeartbeatForTests();
assertSame(mockNN1,bpos.getActiveNN());
mockHaStatuses[1]=new NNHAStatusHeartbeat(HAServiceState.ACTIVE,2);
bpos.triggerHeartbeatForTests();
assertSame(mockNN2,bpos.getActiveNN());
bpos.triggerHeartbeatForTests();
assertSame(mockNN2,bpos.getActiveNN());
mockHaStatuses[1]=new NNHAStatusHeartbeat(HAServiceState.STANDBY,2);
bpos.triggerHeartbeatForTests();
assertNull(bpos.getActiveNN());
mockHaStatuses[0]=new NNHAStatusHeartbeat(HAServiceState.ACTIVE,3);
bpos.triggerHeartbeatForTests();
assertSame(mockNN1,bpos.getActiveNN());
}
finally {
bpos.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that the BPOS can register to talk to two different NNs,
* sends block reports to both, etc.
*/
@Test public void testBasicFunctionality() throws Exception {
BPOfferService bpos=setupBPOSForNNs(mockNN1,mockNN2);
bpos.start();
try {
waitForInitialization(bpos);
Mockito.verify(mockNN1).registerDatanode(Mockito.any(DatanodeRegistration.class));
Mockito.verify(mockNN2).registerDatanode(Mockito.any(DatanodeRegistration.class));
waitForBlockReport(mockNN1);
waitForBlockReport(mockNN2);
bpos.notifyNamenodeReceivedBlock(FAKE_BLOCK,"","");
ReceivedDeletedBlockInfo[] ret=waitForBlockReceived(FAKE_BLOCK,mockNN1);
assertEquals(1,ret.length);
assertEquals(FAKE_BLOCK.getLocalBlock(),ret[0].getBlock());
ret=waitForBlockReceived(FAKE_BLOCK,mockNN2);
assertEquals(1,ret.length);
assertEquals(FAKE_BLOCK.getLocalBlock(),ret[0].getBlock());
}
finally {
bpos.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testBlockReplacement() throws Exception {
final Configuration CONF=new HdfsConfiguration();
final String[] INITIAL_RACKS={"/RACK0","/RACK1","/RACK2"};
final String[] NEW_RACKS={"/RACK2"};
final short REPLICATION_FACTOR=(short)3;
final int DEFAULT_BLOCK_SIZE=1024;
final Random r=new Random();
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DEFAULT_BLOCK_SIZE);
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,DEFAULT_BLOCK_SIZE / 2);
CONF.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,500);
cluster=new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR).racks(INITIAL_RACKS).build();
try {
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path fileName=new Path("/tmp.txt");
DFSTestUtil.createFile(fs,fileName,DEFAULT_BLOCK_SIZE,REPLICATION_FACTOR,r.nextLong());
DFSTestUtil.waitReplication(fs,fileName,REPLICATION_FACTOR);
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,CONF);
List locatedBlocks=client.getNamenode().getBlockLocations("/tmp.txt",0,DEFAULT_BLOCK_SIZE).getLocatedBlocks();
assertEquals(1,locatedBlocks.size());
LocatedBlock block=locatedBlocks.get(0);
DatanodeInfo[] oldNodes=block.getLocations();
assertEquals(oldNodes.length,3);
ExtendedBlock b=block.getBlock();
cluster.startDataNodes(CONF,1,true,null,NEW_RACKS);
cluster.waitActive();
DatanodeInfo[] datanodes=client.datanodeReport(DatanodeReportType.ALL);
DatanodeInfo newNode=null;
for ( DatanodeInfo node : datanodes) {
Boolean isNewNode=true;
for ( DatanodeInfo oldNode : oldNodes) {
if (node.equals(oldNode)) {
isNewNode=false;
break;
}
}
if (isNewNode) {
newNode=node;
break;
}
}
assertTrue(newNode != null);
DatanodeInfo source=null;
ArrayList proxies=new ArrayList(2);
for ( DatanodeInfo node : datanodes) {
if (node != newNode) {
if (node.getNetworkLocation().equals(newNode.getNetworkLocation())) {
source=node;
}
else {
proxies.add(node);
}
}
}
assertTrue(source != null && proxies.size() == 2);
LOG.info("Testcase 1: Proxy " + newNode + " does not contain the block "+ b);
assertFalse(replaceBlock(b,source,newNode,proxies.get(0)));
LOG.info("Testcase 2: Destination " + proxies.get(1) + " contains the block "+ b);
assertFalse(replaceBlock(b,source,proxies.get(0),proxies.get(1)));
LOG.info("Testcase 3: Source=" + source + " Proxy="+ proxies.get(0)+ " Destination="+ newNode);
assertTrue(replaceBlock(b,source,proxies.get(0),newNode));
checkBlocks(new DatanodeInfo[]{newNode,proxies.get(0),proxies.get(1)},fileName.toString(),DEFAULT_BLOCK_SIZE,REPLICATION_FACTOR,client);
LOG.info("Testcase 4: invalid del hint " + proxies.get(0));
assertTrue(replaceBlock(b,proxies.get(0),proxies.get(1),source));
checkBlocks(proxies.toArray(new DatanodeInfo[proxies.size()]),fileName.toString(),DEFAULT_BLOCK_SIZE,REPLICATION_FACTOR,client);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test(timeout=120000) public void testFadviseAfterWriteThenRead() throws Exception {
LOG.info("testFadviseAfterWriteThenRead");
tracker.clear();
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
String TEST_PATH="/test";
int TEST_PATH_LEN=MAX_TEST_FILE_LEN;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
createHdfsFile(fs,new Path(TEST_PATH),TEST_PATH_LEN,true);
ExtendedBlock block=cluster.getNameNode().getRpcServer().getBlockLocations(TEST_PATH,0,Long.MAX_VALUE).get(0).getBlock();
String fadvisedFileName=MiniDFSCluster.getBlockFile(0,block).getName();
Stats stats=tracker.getStats(fadvisedFileName);
stats.assertDroppedInRange(0,TEST_PATH_LEN - WRITE_PACKET_SIZE);
stats.clear();
readHdfsFile(fs,new Path(TEST_PATH),Long.MAX_VALUE,true);
Assert.assertNotNull(stats);
stats.assertDroppedInRange(0,TEST_PATH_LEN - WRITE_PACKET_SIZE);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test(timeout=120000) public void testNoFadviseAfterWriteThenRead() throws Exception {
LOG.info("testNoFadviseAfterWriteThenRead");
tracker.clear();
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
String TEST_PATH="/test";
int TEST_PATH_LEN=MAX_TEST_FILE_LEN;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
createHdfsFile(fs,new Path(TEST_PATH),TEST_PATH_LEN,false);
ExtendedBlock block=cluster.getNameNode().getRpcServer().getBlockLocations(TEST_PATH,0,Long.MAX_VALUE).get(0).getBlock();
String fadvisedFileName=MiniDFSCluster.getBlockFile(0,block).getName();
Stats stats=tracker.getStats(fadvisedFileName);
Assert.assertNull(stats);
readHdfsFile(fs,new Path(TEST_PATH),Long.MAX_VALUE,false);
Assert.assertNull(stats);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier
/**
* Test the scenario where the DataNode defaults to not dropping the cache,
* but our client defaults are set.
*/
@Test(timeout=120000) public void testClientDefaults() throws Exception {
LOG.info("testClientDefaults");
tracker.clear();
Configuration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY,false);
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY,false);
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS,true);
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES,true);
MiniDFSCluster cluster=null;
String TEST_PATH="/test";
int TEST_PATH_LEN=MAX_TEST_FILE_LEN;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
createHdfsFile(fs,new Path(TEST_PATH),TEST_PATH_LEN,null);
ExtendedBlock block=cluster.getNameNode().getRpcServer().getBlockLocations(TEST_PATH,0,Long.MAX_VALUE).get(0).getBlock();
String fadvisedFileName=MiniDFSCluster.getBlockFile(0,block).getName();
Stats stats=tracker.getStats(fadvisedFileName);
stats.assertDroppedInRange(0,TEST_PATH_LEN - WRITE_PACKET_SIZE);
stats.clear();
readHdfsFile(fs,new Path(TEST_PATH),Long.MAX_VALUE,null);
Assert.assertNotNull(stats);
stats.assertDroppedInRange(0,TEST_PATH_LEN - WRITE_PACKET_SIZE);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier ConditionMatcher HybridVerifier
@Test(timeout=30000) public void testDataDirParsing() throws Throwable {
Configuration conf=new Configuration();
List locations;
File dir0=new File("/dir0");
File dir1=new File("/dir1");
File dir2=new File("/dir2");
File dir3=new File("/dir3");
String locations1="[disk]/dir0,[DISK]/dir1,[sSd]/dir2,[disK]/dir3";
conf.set(DFS_DATANODE_DATA_DIR_KEY,locations1);
locations=DataNode.getStorageLocations(conf);
assertThat(locations.size(),is(4));
assertThat(locations.get(0).getStorageType(),is(StorageType.DISK));
assertThat(locations.get(0).getUri(),is(dir0.toURI()));
assertThat(locations.get(1).getStorageType(),is(StorageType.DISK));
assertThat(locations.get(1).getUri(),is(dir1.toURI()));
assertThat(locations.get(2).getStorageType(),is(StorageType.SSD));
assertThat(locations.get(2).getUri(),is(dir2.toURI()));
assertThat(locations.get(3).getStorageType(),is(StorageType.DISK));
assertThat(locations.get(3).getUri(),is(dir3.toURI()));
String locations2="[BadMediaType]/dir0,[ssd]/dir1,[disk]/dir2";
conf.set(DFS_DATANODE_DATA_DIR_KEY,locations2);
try {
locations=DataNode.getStorageLocations(conf);
fail();
}
catch ( IllegalArgumentException iae) {
DataNode.LOG.info("The exception is expected.",iae);
}
String locations3="/dir0,/dir1";
conf.set(DFS_DATANODE_DATA_DIR_KEY,locations3);
locations=DataNode.getStorageLocations(conf);
assertThat(locations.size(),is(2));
assertThat(locations.get(0).getStorageType(),is(StorageType.DISK));
assertThat(locations.get(0).getUri(),is(dir0.toURI()));
assertThat(locations.get(1).getStorageType(),is(StorageType.DISK));
assertThat(locations.get(1).getUri(),is(dir1.toURI()));
}
APIUtilityVerifier EqualityVerifier ConditionMatcher HybridVerifier
@Test(timeout=30000) public void testDataDirValidation() throws Throwable {
DataNodeDiskChecker diskChecker=mock(DataNodeDiskChecker.class);
doThrow(new IOException()).doThrow(new IOException()).doNothing().when(diskChecker).checkDir(any(LocalFileSystem.class),any(Path.class));
LocalFileSystem fs=mock(LocalFileSystem.class);
AbstractList locations=new ArrayList();
locations.add(StorageLocation.parse("file:/p1/"));
locations.add(StorageLocation.parse("file:/p2/"));
locations.add(StorageLocation.parse("file:/p3/"));
List checkedLocations=DataNode.checkStorageLocations(locations,fs,diskChecker);
assertEquals("number of valid data dirs",1,checkedLocations.size());
String validDir=checkedLocations.iterator().next().getFile().getPath();
assertThat("p3 should be valid",new File("/p3/").getPath(),is(validDir));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testDataNodeMXBean() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
List datanodes=cluster.getDataNodes();
Assert.assertEquals(datanodes.size(),1);
DataNode datanode=datanodes.get(0);
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
String clusterId=(String)mbs.getAttribute(mxbeanName,"ClusterId");
Assert.assertEquals(datanode.getClusterId(),clusterId);
String version=(String)mbs.getAttribute(mxbeanName,"Version");
Assert.assertEquals(datanode.getVersion(),version);
String rpcPort=(String)mbs.getAttribute(mxbeanName,"RpcPort");
Assert.assertEquals(datanode.getRpcPort(),rpcPort);
String httpPort=(String)mbs.getAttribute(mxbeanName,"HttpPort");
Assert.assertEquals(datanode.getHttpPort(),httpPort);
String namenodeAddresses=(String)mbs.getAttribute(mxbeanName,"NamenodeAddresses");
Assert.assertEquals(datanode.getNamenodeAddresses(),namenodeAddresses);
String volumeInfo=(String)mbs.getAttribute(mxbeanName,"VolumeInfo");
Assert.assertEquals(replaceDigits(datanode.getVolumeInfo()),replaceDigits(volumeInfo));
int xceiverCount=(Integer)mbs.getAttribute(mxbeanName,"XceiverCount");
Assert.assertEquals(datanode.getXceiverCount(),xceiverCount);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* starts single nn and single dn and verifies registration and handshake
* @throws IOException
*/
@Test public void testFedSingleNN() throws IOException {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nameNodePort(9927).build();
try {
NameNode nn1=cluster.getNameNode();
assertNotNull("cannot create nn1",nn1);
String bpid1=FSImageTestUtil.getFSImage(nn1).getBlockPoolID();
String cid1=FSImageTestUtil.getFSImage(nn1).getClusterID();
int lv1=FSImageTestUtil.getFSImage(nn1).getLayoutVersion();
LOG.info("nn1: lv=" + lv1 + ";cid="+ cid1+ ";bpid="+ bpid1+ ";uri="+ nn1.getNameNodeAddress());
DataNode dn=cluster.getDataNodes().get(0);
final Map volInfos=dn.data.getVolumeInfoMap();
Assert.assertTrue("No volumes in the fsdataset",volInfos.size() > 0);
int i=0;
for ( Map.Entry e : volInfos.entrySet()) {
LOG.info("vol " + i++ + ") "+ e.getKey()+ ": "+ e.getValue());
}
assertEquals("number of volumes is wrong",2,volInfos.size());
for ( BPOfferService bpos : dn.getAllBpOs()) {
LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration + "; sid="+ bpos.bpRegistration.getDatanodeUuid()+ "; nna="+ getNNSocketAddress(bpos));
}
BPOfferService bpos1=dn.getAllBpOs()[0];
bpos1.triggerBlockReportForTests();
assertEquals("wrong nn address",getNNSocketAddress(bpos1),nn1.getNameNodeAddress());
assertEquals("wrong bpid",bpos1.getBlockPoolId(),bpid1);
assertEquals("wrong cid",dn.getClusterId(),cid1);
cluster.shutdown();
assertEquals(0,dn.getAllBpOs().length);
cluster=null;
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* start multiple NNs and single DN and verifies per BP registrations and
* handshakes.
* @throws IOException
*/
@Test public void test2NNRegistration() throws IOException {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build();
try {
cluster.waitActive();
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
assertNotNull("cannot create nn1",nn1);
assertNotNull("cannot create nn2",nn2);
String bpid1=FSImageTestUtil.getFSImage(nn1).getBlockPoolID();
String bpid2=FSImageTestUtil.getFSImage(nn2).getBlockPoolID();
String cid1=FSImageTestUtil.getFSImage(nn1).getClusterID();
String cid2=FSImageTestUtil.getFSImage(nn2).getClusterID();
int lv1=FSImageTestUtil.getFSImage(nn1).getLayoutVersion();
int lv2=FSImageTestUtil.getFSImage(nn2).getLayoutVersion();
int ns1=FSImageTestUtil.getFSImage(nn1).getNamespaceID();
int ns2=FSImageTestUtil.getFSImage(nn2).getNamespaceID();
assertNotSame("namespace ids should be different",ns1,ns2);
LOG.info("nn1: lv=" + lv1 + ";cid="+ cid1+ ";bpid="+ bpid1+ ";uri="+ nn1.getNameNodeAddress());
LOG.info("nn2: lv=" + lv2 + ";cid="+ cid2+ ";bpid="+ bpid2+ ";uri="+ nn2.getNameNodeAddress());
DataNode dn=cluster.getDataNodes().get(0);
final Map volInfos=dn.data.getVolumeInfoMap();
Assert.assertTrue("No volumes in the fsdataset",volInfos.size() > 0);
int i=0;
for ( Map.Entry e : volInfos.entrySet()) {
LOG.info("vol " + i++ + ") "+ e.getKey()+ ": "+ e.getValue());
}
assertEquals("number of volumes is wrong",2,volInfos.size());
for ( BPOfferService bpos : dn.getAllBpOs()) {
LOG.info("BP: " + bpos);
}
BPOfferService bpos1=dn.getAllBpOs()[0];
BPOfferService bpos2=dn.getAllBpOs()[1];
if (getNNSocketAddress(bpos1).equals(nn2.getNameNodeAddress())) {
BPOfferService tmp=bpos1;
bpos1=bpos2;
bpos2=tmp;
}
assertEquals("wrong nn address",getNNSocketAddress(bpos1),nn1.getNameNodeAddress());
assertEquals("wrong nn address",getNNSocketAddress(bpos2),nn2.getNameNodeAddress());
assertEquals("wrong bpid",bpos1.getBlockPoolId(),bpid1);
assertEquals("wrong bpid",bpos2.getBlockPoolId(),bpid2);
assertEquals("wrong cid",dn.getClusterId(),cid1);
assertEquals("cid should be same",cid2,cid1);
assertEquals("namespace should be same",bpos1.bpNSInfo.namespaceID,ns1);
assertEquals("namespace should be same",bpos2.bpNSInfo.namespaceID,ns2);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier ConditionMatcher
@Test(timeout=600000) public void testDatanodeRollingUpgradeWithRollback() throws Exception {
try {
startCluster();
Path testFile1=new Path("/TestDataNodeRollingUpgrade1.dat");
DFSTestUtil.createFile(fs,testFile1,FILE_SIZE,REPL_FACTOR,SEED);
String fileContents1=DFSTestUtil.readFile(fs,testFile1);
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
startRollingUpgrade();
File blockFile=getBlockForFile(testFile1,true);
File trashFile=getTrashFileForBlock(blockFile,false);
deleteAndEnsureInTrash(testFile1,blockFile,trashFile);
rollbackRollingUpgrade();
ensureTrashRestored(blockFile,trashFile);
assert (fs.exists(testFile1));
String fileContents2=DFSTestUtil.readFile(fs,testFile1);
assertThat(fileContents1,is(fileContents2));
}
finally {
shutdownCluster();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
/**
* Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
* option, ie the DN tolerates a failed-to-use scenario during
* its start-up.
*/
@Test public void testValidVolumesAtStartup() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
cluster.shutdownDataNodes();
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,1);
File tld=new File(MiniDFSCluster.getBaseDirectory(),"badData");
File dataDir1=new File(tld,"data1");
File dataDir1Actual=new File(dataDir1,"1");
dataDir1Actual.mkdirs();
File dataDir2=new File(tld,"data2");
prepareDirToFail(dataDir2);
File dataDir2Actual=new File(dataDir2,"2");
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,dataDir1Actual.getPath() + "," + dataDir2Actual.getPath());
cluster.startDataNodes(conf,1,false,null,null);
cluster.waitActive();
try {
assertTrue("The DN should have started up fine.",cluster.isDataNodeUp());
DataNode dn=cluster.getDataNodes().get(0);
String si=DataNodeTestUtils.getFSDataset(dn).getStorageInfo();
assertTrue("The DN should have started with this directory",si.contains(dataDir1Actual.getPath()));
assertFalse("The DN shouldn't have a bad directory.",si.contains(dataDir2Actual.getPath()));
}
finally {
cluster.shutdownDataNodes();
FileUtil.chmod(dataDir2.toString(),"755");
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test to check that a DN goes down when all its volumes have failed.
*/
@Test public void testShutdown() throws Exception {
if (System.getProperty("os.name").startsWith("Windows")) {
return;
}
cluster.startDataNodes(conf,2,true,null,null);
cluster.waitActive();
final int dnIndex=0;
String bpid=cluster.getNamesystem().getBlockPoolId();
File storageDir=cluster.getInstanceStorageDir(dnIndex,0);
File dir1=MiniDFSCluster.getRbwDir(storageDir,bpid);
storageDir=cluster.getInstanceStorageDir(dnIndex,1);
File dir2=MiniDFSCluster.getRbwDir(storageDir,bpid);
try {
assertTrue("Couldn't chmod local vol",dir1.setReadOnly());
assertTrue("Couldn't chmod local vol",dir2.setReadOnly());
DataNode dn=cluster.getDataNodes().get(dnIndex);
for (int i=0; dn.isDatanodeUp(); i++) {
Path fileName=new Path("/test.txt" + i);
DFSTestUtil.createFile(fs,fileName,1024,(short)2,1L);
DFSTestUtil.waitReplication(fs,fileName,(short)2);
fs.delete(fileName,true);
}
}
finally {
FileUtil.setWritable(dir1,true);
FileUtil.setWritable(dir2,true);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Check that the permissions of the local DN directories are as expected.
*/
@Test public void testLocalDirs() throws Exception {
Configuration conf=new Configuration();
final String permStr=conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY);
FsPermission expected=new FsPermission(permStr);
FileSystem localFS=FileSystem.getLocal(conf);
for ( DataNode dn : cluster.getDataNodes()) {
for ( FsVolumeSpi v : dn.getFSDataset().getVolumes()) {
String dir=v.getBasePath();
Path dataDir=new Path(dir);
FsPermission actual=localFS.getFileStatus(dataDir).getPermission();
assertEquals("Permission for dir: " + dataDir + ", is "+ actual+ ", while expected is "+ expected,expected,actual);
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier
/**
* Verify that the NameNode can learn about new storages from incremental
* block reports.
* This tests the fix for the error condition seen in HDFS-6904.
* @throws IOException
* @throws InterruptedException
*/
@Test(timeout=60000) public void testNnLearnsNewStorages() throws IOException, InterruptedException {
final String newStorageUuid=UUID.randomUUID().toString();
final DatanodeStorage newStorage=new DatanodeStorage(newStorageUuid);
StorageReceivedDeletedBlocks[] reports=makeReportForReceivedBlock(getDummyBlock(),newStorage);
cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg,poolId,reports);
DatanodeStorageInfo storageInfo=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn0.getDatanodeId()).getStorageInfo(newStorageUuid);
assertNotNull(storageInfo);
}
APIUtilityVerifier TestInitializer InternalCallVerifier ConditionMatcher HybridVerifier
/**
* Setup a {@link MiniDFSCluster}.
* Create a block with both {@link State#NORMAL} and {@link State#READ_ONLY_SHARED} replicas.
*/
@Before public void setup() throws IOException, InterruptedException {
conf=new HdfsConfiguration();
SimulatedFSDataset.setFactory(conf);
Configuration[] overlays=new Configuration[NUM_DATANODES];
for (int i=0; i < overlays.length; i++) {
overlays[i]=new Configuration();
if (i == RO_NODE_INDEX) {
overlays[i].setEnum(SimulatedFSDataset.CONFIG_PROPERTY_STATE,i == RO_NODE_INDEX ? READ_ONLY_SHARED : NORMAL);
}
}
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).dataNodeConfOverlays(overlays).build();
fs=cluster.getFileSystem();
blockManager=cluster.getNameNode().getNamesystem().getBlockManager();
datanodeManager=blockManager.getDatanodeManager();
client=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),cluster.getConfiguration(0));
for (int i=0; i < NUM_DATANODES; i++) {
DataNode dataNode=cluster.getDataNodes().get(i);
validateStorageState(BlockManagerTestUtil.getStorageReportsForDatanode(datanodeManager.getDatanode(dataNode.getDatanodeId())),i == RO_NODE_INDEX ? READ_ONLY_SHARED : NORMAL);
}
DFSTestUtil.createFile(fs,PATH,BLOCK_SIZE,BLOCK_SIZE,BLOCK_SIZE,(short)1,seed);
LocatedBlock locatedBlock=getLocatedBlock();
extendedBlock=locatedBlock.getBlock();
block=extendedBlock.getLocalBlock();
assertThat(locatedBlock.getLocations().length,is(1));
normalDataNode=locatedBlock.getLocations()[0];
readOnlyDataNode=datanodeManager.getDatanode(cluster.getDataNodes().get(RO_NODE_INDEX).getDatanodeId());
assertThat(normalDataNode,is(not(readOnlyDataNode)));
validateNumberReplicas(1);
cluster.injectBlocks(0,RO_NODE_INDEX,Collections.singleton(block));
waitForLocations(2);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidate() throws IOException {
final SimulatedFSDataset fsdataset=getSimulatedFSDataset();
int bytesAdded=addSomeBlocks(fsdataset);
Block[] deleteBlocks=new Block[2];
deleteBlocks[0]=new Block(1,0,0);
deleteBlocks[1]=new Block(2,0,0);
fsdataset.invalidate(bpid,deleteBlocks);
checkInvalidBlock(new ExtendedBlock(bpid,deleteBlocks[0]));
checkInvalidBlock(new ExtendedBlock(bpid,deleteBlocks[1]));
long sizeDeleted=blockIdToLen(1) + blockIdToLen(2);
assertEquals(bytesAdded - sizeDeleted,fsdataset.getDfsUsed());
assertEquals(fsdataset.getCapacity() - bytesAdded + sizeDeleted,fsdataset.getRemaining());
for (int i=3; i <= NUMBLOCKS; ++i) {
Block b=new Block(i,0,0);
assertTrue(fsdataset.isValidBlock(new ExtendedBlock(bpid,b)));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetMetaData() throws IOException {
final SimulatedFSDataset fsdataset=getSimulatedFSDataset();
ExtendedBlock b=new ExtendedBlock(bpid,1,5,0);
try {
assertTrue(fsdataset.getMetaDataInputStream(b) == null);
assertTrue("Expected an IO exception",false);
}
catch ( IOException e) {
}
addSomeBlocks(fsdataset);
b=new ExtendedBlock(bpid,1,0,0);
InputStream metaInput=fsdataset.getMetaDataInputStream(b);
DataInputStream metaDataInput=new DataInputStream(metaInput);
short version=metaDataInput.readShort();
assertEquals(BlockMetadataHeader.VERSION,version);
DataChecksum checksum=DataChecksum.newDataChecksum(metaDataInput);
assertEquals(DataChecksum.Type.NULL,checksum.getChecksumType());
assertEquals(0,checksum.getChecksumSize());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testInjectionNonEmpty() throws IOException {
SimulatedFSDataset fsdataset=getSimulatedFSDataset();
BlockListAsLongs blockReport=fsdataset.getBlockReport(bpid);
assertEquals(0,blockReport.getNumberOfBlocks());
int bytesAdded=addSomeBlocks(fsdataset);
blockReport=fsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks());
for ( Block b : blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes());
}
fsdataset=null;
SimulatedFSDataset sfsdataset=getSimulatedFSDataset();
bytesAdded+=addSomeBlocks(sfsdataset,NUMBLOCKS + 1);
sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks());
sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks());
sfsdataset.injectBlocks(bpid,blockReport);
blockReport=sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS * 2,blockReport.getNumberOfBlocks());
for ( Block b : blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes());
assertEquals(blockIdToLen(b.getBlockId()),sfsdataset.getLength(new ExtendedBlock(bpid,b)));
}
assertEquals(bytesAdded,sfsdataset.getDfsUsed());
assertEquals(sfsdataset.getCapacity() - bytesAdded,sfsdataset.getRemaining());
conf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,10);
try {
sfsdataset=getSimulatedFSDataset();
sfsdataset.addBlockPool(bpid,conf);
sfsdataset.injectBlocks(bpid,blockReport);
assertTrue("Expected an IO exception",false);
}
catch ( IOException e) {
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testInjectionEmpty() throws IOException {
SimulatedFSDataset fsdataset=getSimulatedFSDataset();
BlockListAsLongs blockReport=fsdataset.getBlockReport(bpid);
assertEquals(0,blockReport.getNumberOfBlocks());
int bytesAdded=addSomeBlocks(fsdataset);
blockReport=fsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks());
for ( Block b : blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes());
}
SimulatedFSDataset sfsdataset=getSimulatedFSDataset();
sfsdataset.injectBlocks(bpid,blockReport);
blockReport=sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks());
for ( Block b : blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes());
assertEquals(blockIdToLen(b.getBlockId()),sfsdataset.getLength(new ExtendedBlock(bpid,b)));
}
assertEquals(bytesAdded,sfsdataset.getDfsUsed());
assertEquals(sfsdataset.getCapacity() - bytesAdded,sfsdataset.getRemaining());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testStorageUsage() throws IOException {
final SimulatedFSDataset fsdataset=getSimulatedFSDataset();
assertEquals(fsdataset.getDfsUsed(),0);
assertEquals(fsdataset.getRemaining(),fsdataset.getCapacity());
int bytesAdded=addSomeBlocks(fsdataset);
assertEquals(bytesAdded,fsdataset.getDfsUsed());
assertEquals(fsdataset.getCapacity() - bytesAdded,fsdataset.getRemaining());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTransferRbw() throws Exception {
final HdfsConfiguration conf=new HdfsConfiguration();
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
try {
cluster.waitActive();
final DistributedFileSystem fs=cluster.getFileSystem();
final Path p=new Path("/foo");
final int size=(1 << 16) + RAN.nextInt(1 << 16);
LOG.info("size = " + size);
final FSDataOutputStream out=fs.create(p,REPLICATION);
final byte[] bytes=new byte[1024];
for (int remaining=size; remaining > 0; ) {
RAN.nextBytes(bytes);
final int len=bytes.length < remaining ? bytes.length : remaining;
out.write(bytes,0,len);
out.hflush();
remaining-=len;
}
final ReplicaBeingWritten oldrbw;
final DataNode newnode;
final DatanodeInfo newnodeinfo;
final String bpid=cluster.getNamesystem().getBlockPoolId();
{
final DataNode oldnode=cluster.getDataNodes().get(0);
oldrbw=getRbw(oldnode,bpid);
LOG.info("oldrbw = " + oldrbw);
cluster.startDataNodes(conf,1,true,null,null);
newnode=cluster.getDataNodes().get(REPLICATION);
final DatanodeInfo oldnodeinfo;
{
final DatanodeInfo[] datatnodeinfos=cluster.getNameNodeRpc().getDatanodeReport(DatanodeReportType.LIVE);
Assert.assertEquals(2,datatnodeinfos.length);
int i=0;
for (DatanodeRegistration dnReg=newnode.getDNRegistrationForBP(bpid); i < datatnodeinfos.length && !datatnodeinfos[i].equals(dnReg); i++) ;
Assert.assertTrue(i < datatnodeinfos.length);
newnodeinfo=datatnodeinfos[i];
oldnodeinfo=datatnodeinfos[1 - i];
}
final ExtendedBlock b=new ExtendedBlock(bpid,oldrbw.getBlockId(),oldrbw.getBytesAcked(),oldrbw.getGenerationStamp());
final BlockOpResponseProto s=DFSTestUtil.transferRbw(b,DFSClientAdapter.getDFSClient(fs),oldnodeinfo,newnodeinfo);
Assert.assertEquals(Status.SUCCESS,s.getStatus());
}
final ReplicaBeingWritten newrbw=getRbw(newnode,bpid);
LOG.info("newrbw = " + newrbw);
Assert.assertEquals(oldrbw.getBlockId(),newrbw.getBlockId());
Assert.assertEquals(oldrbw.getGenerationStamp(),newrbw.getGenerationStamp());
Assert.assertEquals(oldrbw.getVisibleLength(),newrbw.getVisibleLength());
LOG.info("DONE");
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testRecoverReplicas() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,1024L);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,512);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
try {
FileSystem fs=cluster.getFileSystem();
for (int i=0; i < 4; i++) {
Path fileName=new Path("/test" + i);
DFSTestUtil.createFile(fs,fileName,1,(short)1,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)1);
}
String bpid=cluster.getNamesystem().getBlockPoolId();
DataNode dn=cluster.getDataNodes().get(0);
Iterator replicasItor=dataset(dn).volumeMap.replicas(bpid).iterator();
ReplicaInfo replica=replicasItor.next();
createUnlinkTmpFile(replica,true,true);
createUnlinkTmpFile(replica,false,true);
replica=replicasItor.next();
createUnlinkTmpFile(replica,true,false);
createUnlinkTmpFile(replica,false,false);
replica=replicasItor.next();
createUnlinkTmpFile(replica,true,true);
createUnlinkTmpFile(replica,false,false);
cluster.restartDataNodes();
cluster.waitActive();
dn=cluster.getDataNodes().get(0);
Collection replicas=dataset(dn).volumeMap.replicas(bpid);
Assert.assertEquals(4,replicas.size());
replicasItor=replicas.iterator();
while (replicasItor.hasNext()) {
Assert.assertEquals(ReplicaState.FINALIZED,replicasItor.next().getState());
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test for{@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock,long,long)}
*/
@Test public void testUpdateReplicaUnderRecovery() throws IOException {
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
String bpid=cluster.getNamesystem().getBlockPoolId();
DistributedFileSystem dfs=cluster.getFileSystem();
String filestr="/foo";
Path filepath=new Path(filestr);
DFSTestUtil.createFile(dfs,filepath,1024L,(short)3,0L);
final LocatedBlock locatedblock=getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(),filestr);
final DatanodeInfo[] datanodeinfo=locatedblock.getLocations();
Assert.assertTrue(datanodeinfo.length > 0);
final DataNode datanode=cluster.getDataNode(datanodeinfo[0].getIpcPort());
Assert.assertTrue(datanode != null);
final ExtendedBlock b=locatedblock.getBlock();
final long recoveryid=b.getGenerationStamp() + 1;
final long newlength=b.getNumBytes() - 1;
final FsDatasetSpi> fsdataset=DataNodeTestUtils.getFSDataset(datanode);
final ReplicaRecoveryInfo rri=fsdataset.initReplicaRecovery(new RecoveringBlock(b,null,recoveryid));
final ReplicaInfo replica=FsDatasetTestUtil.fetchReplicaInfo(fsdataset,bpid,b.getBlockId());
Assert.assertEquals(ReplicaState.RUR,replica.getState());
FsDatasetImpl.checkReplicaFiles(replica);
{
final ExtendedBlock tmp=new ExtendedBlock(b.getBlockPoolId(),rri.getBlockId(),rri.getNumBytes() - 1,rri.getGenerationStamp());
try {
fsdataset.updateReplicaUnderRecovery(tmp,recoveryid,newlength);
Assert.fail();
}
catch ( IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
final String storageID=fsdataset.updateReplicaUnderRecovery(new ExtendedBlock(b.getBlockPoolId(),rri),recoveryid,newlength);
assertTrue(storageID != null);
}
finally {
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test {@link FsDatasetImpl#initReplicaRecovery(String,ReplicaMap,Block,long,long)}
*/
@Test public void testInitReplicaRecovery() throws IOException {
final long firstblockid=10000L;
final long gs=7777L;
final long length=22L;
final ReplicaMap map=new ReplicaMap(this);
String bpid="BP-TEST";
final Block[] blocks=new Block[5];
for (int i=0; i < blocks.length; i++) {
blocks[i]=new Block(firstblockid + i,length,gs);
map.add(bpid,createReplicaInfo(blocks[i]));
}
{
final Block b=blocks[0];
final ReplicaInfo originalInfo=map.get(bpid,b);
final long recoveryid=gs + 1;
final ReplicaRecoveryInfo recoveryInfo=FsDatasetImpl.initReplicaRecovery(bpid,map,blocks[0],recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
assertEquals(originalInfo,recoveryInfo);
final ReplicaUnderRecovery updatedInfo=(ReplicaUnderRecovery)map.get(bpid,b);
Assert.assertEquals(originalInfo.getBlockId(),updatedInfo.getBlockId());
Assert.assertEquals(recoveryid,updatedInfo.getRecoveryID());
final long recoveryid2=gs + 2;
final ReplicaRecoveryInfo recoveryInfo2=FsDatasetImpl.initReplicaRecovery(bpid,map,blocks[0],recoveryid2,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
assertEquals(originalInfo,recoveryInfo2);
final ReplicaUnderRecovery updatedInfo2=(ReplicaUnderRecovery)map.get(bpid,b);
Assert.assertEquals(originalInfo.getBlockId(),updatedInfo2.getBlockId());
Assert.assertEquals(recoveryid2,updatedInfo2.getRecoveryID());
try {
FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.fail();
}
catch ( RecoveryInProgressException ripe) {
System.out.println("GOOD: getting " + ripe);
}
}
{
final long recoveryid=gs + 1;
final Block b=new Block(firstblockid - 1,length,gs);
ReplicaRecoveryInfo r=FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.assertNull("Data-node should not have this replica.",r);
}
{
final long recoveryid=gs - 1;
final Block b=new Block(firstblockid + 1,length,gs);
try {
FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.fail();
}
catch ( IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
{
final long recoveryid=gs + 1;
final Block b=new Block(firstblockid,length,gs + 1);
try {
FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
fail("InitReplicaRecovery should fail because replica's " + "gs is less than the block's gs");
}
catch ( IOException e) {
e.getMessage().startsWith("replica.getGenerationStamp() < block.getGenerationStamp(), block=");
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testXAttrAcl() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setOwner(path,BRUCE.getUserName(),null);
FileSystem fsAsBruce=createFileSystem(BRUCE);
FileSystem fsAsDiana=createFileSystem(DIANA);
fsAsBruce.setXAttr(path,name1,value1);
Map xattrs;
try {
xattrs=fsAsDiana.getXAttrs(path);
Assert.fail("Diana should not have read access to get xattrs");
}
catch ( AccessControlException e) {
}
fsAsBruce.modifyAclEntries(path,Lists.newArrayList(aclEntry(ACCESS,USER,DIANA.getUserName(),READ)));
xattrs=fsAsDiana.getXAttrs(path);
Assert.assertArrayEquals(value1,xattrs.get(name1));
try {
fsAsDiana.removeXAttr(path,name1);
Assert.fail("Diana should not have write access to remove xattrs");
}
catch ( AccessControlException e) {
}
try {
fsAsDiana.setXAttr(path,name2,value2);
Assert.fail("Diana should not have write access to set xattrs");
}
catch ( AccessControlException e) {
}
fsAsBruce.modifyAclEntries(path,Lists.newArrayList(aclEntry(ACCESS,USER,DIANA.getUserName(),ALL)));
fsAsDiana.setXAttr(path,name2,value2);
Assert.assertArrayEquals(value2,fsAsDiana.getXAttrs(path).get(name2));
fsAsDiana.removeXAttr(path,name1);
fsAsDiana.removeXAttr(path,name2);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWebHdfsAuditLogger() throws IOException, URISyntaxException {
Configuration conf=new HdfsConfiguration();
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,DummyAuditLogger.class.getName());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
GetOpParam.Op op=GetOpParam.Op.GETFILESTATUS;
try {
cluster.waitClusterUp();
assertTrue(DummyAuditLogger.initialized);
URI uri=new URI("http",NetUtils.getHostPortString(cluster.getNameNode().getHttpAddress()),"/webhdfs/v1/",op.toQueryString(),null);
HttpURLConnection conn=(HttpURLConnection)uri.toURL().openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
assertEquals(200,conn.getResponseCode());
conn.disconnect();
assertEquals(1,DummyAuditLogger.logCount);
assertEquals("127.0.0.1",DummyAuditLogger.remoteAddr);
conn=(HttpURLConnection)uri.toURL().openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setRequestProperty("X-Forwarded-For","1.1.1.1");
conn.connect();
assertEquals(200,conn.getResponseCode());
conn.disconnect();
assertEquals(2,DummyAuditLogger.logCount);
assertEquals("127.0.0.1",DummyAuditLogger.remoteAddr);
conf.set(ProxyServers.CONF_HADOOP_PROXYSERVERS,"127.0.0.1");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
conn=(HttpURLConnection)uri.toURL().openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setRequestProperty("X-Forwarded-For","1.1.1.1");
conn.connect();
assertEquals(200,conn.getResponseCode());
conn.disconnect();
assertEquals(3,DummyAuditLogger.logCount);
assertEquals("1.1.1.1",DummyAuditLogger.remoteAddr);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier
/**
* test that denied access via webhdfs puts proper entry in audit log
*/
@Test public void testAuditWebHdfsDenied() throws Exception {
final Path file=new Path(fnames[0]);
fs.setPermission(file,new FsPermission((short)0600));
fs.setOwner(file,"root",null);
setupAuditLogs();
try {
WebHdfsFileSystem webfs=WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo,conf,WebHdfsFileSystem.SCHEME);
InputStream istream=webfs.open(file);
int val=istream.read();
fail("open+read must not succeed, got " + val);
}
catch ( AccessControlException E) {
System.out.println("got access denied, as expected.");
}
verifyAuditLogsRepeat(false,2);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test that allowed stat puts proper entry in audit log
*/
@Test public void testAuditAllowedStat() throws Exception {
final Path file=new Path(fnames[0]);
FileSystem userfs=DFSTestUtil.getFileSystemAs(userGroupInfo,conf);
setupAuditLogs();
FileStatus st=userfs.getFileStatus(file);
verifyAuditLogs(true);
assertTrue("failed to stat file",st != null && st.isFile());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test that stat via webhdfs puts proper entry in audit log
*/
@Test public void testAuditWebHdfsStat() throws Exception {
final Path file=new Path(fnames[0]);
fs.setPermission(file,new FsPermission((short)0644));
fs.setOwner(file,"root",null);
setupAuditLogs();
WebHdfsFileSystem webfs=WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo,conf,WebHdfsFileSystem.SCHEME);
FileStatus st=webfs.getFileStatus(file);
verifyAuditLogs(true);
assertTrue("failed to stat file",st != null && st.isFile());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test that access via webhdfs puts proper entry in audit log
*/
@Test public void testAuditWebHdfs() throws Exception {
final Path file=new Path(fnames[0]);
fs.setPermission(file,new FsPermission((short)0644));
fs.setOwner(file,"root",null);
setupAuditLogs();
WebHdfsFileSystem webfs=WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo,conf,WebHdfsFileSystem.SCHEME);
InputStream istream=webfs.open(file);
int val=istream.read();
istream.close();
verifyAuditLogsRepeat(true,3);
assertTrue("failed to read from file",val >= 0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test that allowed operation puts proper entry in audit log
*/
@Test public void testAuditAllowed() throws Exception {
final Path file=new Path(fnames[0]);
FileSystem userfs=DFSTestUtil.getFileSystemAs(userGroupInfo,conf);
setupAuditLogs();
InputStream istream=userfs.open(file);
int val=istream.read();
istream.close();
verifyAuditLogs(true);
assertTrue("failed to read from file",val >= 0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Ensure that the backupnode will tail edits from the NN
* and keep in sync, even while the NN rolls, checkpoints
* occur, etc.
*/
@Test public void testBackupNodeTailsEdits() throws Exception {
Configuration conf=new HdfsConfiguration();
HAUtil.setAllowStandbyReads(conf,true);
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
BackupNode backup=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
fileSys=cluster.getFileSystem();
backup=startBackupNode(conf,StartupOption.BACKUP,1);
BackupImage bnImage=(BackupImage)backup.getFSImage();
testBNInSync(cluster,backup,1);
NameNode nn=cluster.getNameNode();
NamenodeProtocols nnRpc=nn.getRpcServer();
nnRpc.rollEditLog();
assertEquals(bnImage.getEditLog().getCurSegmentTxId(),nn.getFSImage().getEditLog().getCurSegmentTxId());
testBNInSync(cluster,backup,2);
long nnImageBefore=nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
backup.doCheckpoint();
long nnImageAfter=nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
assertTrue("nn should have received new checkpoint. before: " + nnImageBefore + " after: "+ nnImageAfter,nnImageAfter > nnImageBefore);
testBNInSync(cluster,backup,3);
StorageDirectory sd=bnImage.getStorage().getStorageDir(0);
backup.stop();
backup=null;
EditLogFile editsLog=FSImageTestUtil.findLatestEditsLog(sd);
assertEquals(editsLog.getFirstTxId(),nn.getFSImage().getEditLog().getCurSegmentTxId());
assertTrue("Should not have finalized " + editsLog,editsLog.isInProgress());
assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down")));
backup=startBackupNode(conf,StartupOption.BACKUP,1);
testBNInSync(cluster,backup,4);
assertNotNull(backup.getNamesystem().getFileInfo("/edit-while-bn-down",false));
backup.stop(false);
assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down-2")));
}
finally {
LOG.info("Shutting down...");
if (backup != null) backup.stop();
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
assertStorageDirsMatch(cluster.getNameNode(),backup);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Verify that a file can be read both from NameNode and BackupNode.
*/
@Test public void testCanReadData() throws IOException {
Path file1=new Path("/fileToRead.dat");
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
BackupNode backup=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
fileSys=cluster.getFileSystem();
long txid=cluster.getNameNodeRpc().getTransactionID();
backup=startBackupNode(conf,StartupOption.BACKUP,1);
waitCheckpointDone(cluster,txid);
String rpcAddrKeyPreffix=DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + ".bnCluster";
String nnAddr=cluster.getNameNode().getNameNodeAddressHostPortString();
conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
String bnAddr=backup.getNameNodeAddressHostPortString();
conf.set(DFSConfigKeys.DFS_NAMESERVICES,"bnCluster");
conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID,"bnCluster");
conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + ".bnCluster","nnActive, nnBackup");
conf.set(rpcAddrKeyPreffix + ".nnActive",nnAddr);
conf.set(rpcAddrKeyPreffix + ".nnBackup",bnAddr);
cluster.startDataNodes(conf,3,true,StartupOption.REGULAR,null);
DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,(short)3,seed);
FileSystem bnFS=FileSystem.get(new Path("hdfs://" + bnAddr).toUri(),conf);
String nnData=DFSTestUtil.readFile(fileSys,file1);
String bnData=DFSTestUtil.readFile(bnFS,file1);
assertEquals("Data read from BackupNode and NameNode is not the same.",nnData,bnData);
}
catch ( IOException e) {
LOG.error("Error in TestBackupNode: ",e);
assertTrue(e.getLocalizedMessage(),false);
}
finally {
if (fileSys != null) fileSys.close();
if (backup != null) backup.stop();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test NameNode.getBlockLocations(..) on reading un-closed files.
*/
@Test public void testGetBlockLocations() throws IOException {
final NamenodeProtocols namenode=cluster.getNameNodeRpc();
final Path p=new Path(BASE_DIR,"file2.dat");
final String src=p.toString();
final FSDataOutputStream out=TestFileCreation.createFile(hdfs,p,3);
int len=BLOCK_SIZE >>> 1;
writeFile(p,out,len);
for (int i=1; i < NUM_BLOCKS; ) {
final LocatedBlocks lb=namenode.getBlockLocations(src,0,len);
final List blocks=lb.getLocatedBlocks();
assertEquals(i,blocks.size());
final Block b=blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
assertTrue(b instanceof BlockInfoUnderConstruction);
if (++i < NUM_BLOCKS) {
writeFile(p,out,BLOCK_SIZE);
len+=BLOCK_SIZE;
}
}
out.close();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testCacheManagerRestart() throws Exception {
SecondaryNameNode secondary=null;
try {
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0");
secondary=new SecondaryNameNode(conf);
final String pool="poolparty";
String groupName="partygroup";
FsPermission mode=new FsPermission((short)0777);
long limit=747;
dfs.addCachePool(new CachePoolInfo(pool).setGroupName(groupName).setMode(mode).setLimit(limit));
RemoteIterator pit=dfs.listCachePools();
assertTrue("No cache pools found",pit.hasNext());
CachePoolInfo info=pit.next().getInfo();
assertEquals(pool,info.getPoolName());
assertEquals(groupName,info.getGroupName());
assertEquals(mode,info.getMode());
assertEquals(limit,(long)info.getLimit());
assertFalse("Unexpected # of cache pools found",pit.hasNext());
int numEntries=10;
String entryPrefix="/party-";
long prevId=-1;
final Date expiry=new Date();
for (int i=0; i < numEntries; i++) {
prevId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path(entryPrefix + i)).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).build());
}
RemoteIterator dit=dfs.listCacheDirectives(null);
for (int i=0; i < numEntries; i++) {
assertTrue("Unexpected # of cache entries: " + i,dit.hasNext());
CacheDirectiveInfo cd=dit.next().getInfo();
assertEquals(i + 1,cd.getId().longValue());
assertEquals(entryPrefix + i,cd.getPath().toUri().getPath());
assertEquals(pool,cd.getPool());
}
assertFalse("Unexpected # of cache directives found",dit.hasNext());
secondary.doCheckpoint();
final String imagePool="imagePool";
dfs.addCachePool(new CachePoolInfo(imagePool));
prevId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/image")).setPool(imagePool).build());
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.saveNamespace();
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
boolean fetchImage=secondary.doCheckpoint();
assertTrue("Secondary should have fetched a new fsimage from NameNode",fetchImage);
dfs.removeCachePool(imagePool);
cluster.restartNameNode();
pit=dfs.listCachePools();
assertTrue("No cache pools found",pit.hasNext());
info=pit.next().getInfo();
assertEquals(pool,info.getPoolName());
assertEquals(pool,info.getPoolName());
assertEquals(groupName,info.getGroupName());
assertEquals(mode,info.getMode());
assertEquals(limit,(long)info.getLimit());
assertFalse("Unexpected # of cache pools found",pit.hasNext());
dit=dfs.listCacheDirectives(null);
for (int i=0; i < numEntries; i++) {
assertTrue("Unexpected # of cache entries: " + i,dit.hasNext());
CacheDirectiveInfo cd=dit.next().getInfo();
assertEquals(i + 1,cd.getId().longValue());
assertEquals(entryPrefix + i,cd.getPath().toUri().getPath());
assertEquals(pool,cd.getPool());
assertEquals(expiry.getTime(),cd.getExpiration().getMillis());
}
assertFalse("Unexpected # of cache directives found",dit.hasNext());
long nextId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/foobar")).setPool(pool).build());
assertEquals(prevId + 1,nextId);
}
finally {
if (secondary != null) {
secondary.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAddRemoveDirectives() throws Exception {
proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool2").setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool3").setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool4").setMode(new FsPermission((short)0)));
CacheDirectiveInfo alpha=new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
CacheDirectiveInfo beta=new CacheDirectiveInfo.Builder().setPath(new Path("/beta")).setPool("pool2").build();
CacheDirectiveInfo delta=new CacheDirectiveInfo.Builder().setPath(new Path("/delta")).setPool("pool1").build();
long alphaId=addAsUnprivileged(alpha);
long alphaId2=addAsUnprivileged(alpha);
assertFalse("Expected to get unique directives when re-adding an " + "existing CacheDirectiveInfo",alphaId == alphaId2);
long betaId=addAsUnprivileged(beta);
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/unicorn")).setPool("no_such_pool").build());
fail("expected an error when adding to a non-existent pool.");
}
catch ( InvalidRequestException ioe) {
GenericTestUtils.assertExceptionContains("Unknown pool",ioe);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/blackhole")).setPool("pool4").build());
fail("expected an error when adding to a pool with " + "mode 0 (no permissions for anyone).");
}
catch ( AccessControlException e) {
GenericTestUtils.assertExceptionContains("Permission denied while accessing pool",e);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/illegal:path/")).setPool("pool1").build());
fail("expected an error when adding a malformed path " + "to the cache directives.");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("is not a valid DFS filename",e);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/emptypoolname")).setReplication((short)1).setPool("").build());
fail("expected an error when adding a cache " + "directive with an empty pool name.");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Invalid empty pool name",e);
}
long deltaId=addAsUnprivileged(delta);
long relativeId=addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("relative")).setPool("pool1").build());
RemoteIterator iter;
iter=dfs.listCacheDirectives(null);
validateListAll(iter,alphaId,alphaId2,betaId,deltaId,relativeId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool3").build());
assertFalse(iter.hasNext());
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool1").build());
validateListAll(iter,alphaId,alphaId2,deltaId,relativeId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
validateListAll(iter,betaId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(alphaId2).build());
validateListAll(iter,alphaId2);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(relativeId).build());
validateListAll(iter,relativeId);
dfs.removeCacheDirective(betaId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
assertFalse(iter.hasNext());
try {
dfs.removeCacheDirective(betaId);
fail("expected an error when removing a non-existent ID");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID",e);
}
try {
proto.removeCacheDirective(-42l);
fail("expected an error when removing a negative ID");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Invalid negative ID",e);
}
try {
proto.removeCacheDirective(43l);
fail("expected an error when removing a non-existent ID");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID",e);
}
dfs.removeCacheDirective(alphaId);
dfs.removeCacheDirective(alphaId2);
dfs.removeCacheDirective(deltaId);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(relativeId).setReplication((short)555).build());
iter=dfs.listCacheDirectives(null);
assertTrue(iter.hasNext());
CacheDirectiveInfo modified=iter.next().getInfo();
assertEquals(relativeId,modified.getId().longValue());
assertEquals((short)555,modified.getReplication().shortValue());
dfs.removeCacheDirective(relativeId);
iter=dfs.listCacheDirectives(null);
assertFalse(iter.hasNext());
CacheDirectiveInfo directive=new CacheDirectiveInfo.Builder().setPath(new Path(".")).setPool("pool1").build();
long id=dfs.addCacheDirective(directive);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(directive).setId(id).setReplication((short)2).build());
dfs.removeCacheDirective(id);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testListCachePoolPermissions() throws Exception {
final UserGroupInformation myUser=UserGroupInformation.createRemoteUser("myuser");
final DistributedFileSystem myDfs=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser,conf);
final String poolName="poolparty";
dfs.addCachePool(new CachePoolInfo(poolName).setMode(new FsPermission((short)0700)));
RemoteIterator it=myDfs.listCachePools();
CachePoolInfo info=it.next().getInfo();
assertFalse(it.hasNext());
assertEquals("Expected pool name",poolName,info.getPoolName());
assertNull("Unexpected owner name",info.getOwnerName());
assertNull("Unexpected group name",info.getGroupName());
assertNull("Unexpected mode",info.getMode());
assertNull("Unexpected limit",info.getLimit());
final long limit=99;
dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(myUser.getShortUserName()).setLimit(limit));
it=myDfs.listCachePools();
info=it.next().getInfo();
assertFalse(it.hasNext());
assertEquals("Expected pool name",poolName,info.getPoolName());
assertEquals("Mismatched owner name",myUser.getShortUserName(),info.getOwnerName());
assertNotNull("Expected group name",info.getGroupName());
assertEquals("Mismatched mode",(short)0700,info.getMode().toShort());
assertEquals("Mismatched limit",limit,(long)info.getLimit());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests save namespace.
*/
@Test public void testSaveNamespace() throws IOException {
DistributedFileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,true);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs=cluster.getFileSystem();
FSNamesystem namesystem=cluster.getNamesystem();
String renewer=UserGroupInformation.getLoginUser().getUserName();
Token token1=namesystem.getDelegationToken(new Text(renewer));
Token token2=namesystem.getDelegationToken(new Text(renewer));
DFSAdmin admin=new DFSAdmin(conf);
String[] args=new String[]{"-saveNamespace"};
NameNode nn=cluster.getNameNode();
for ( StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
EditLogFile log=FSImageTestUtil.findLatestEditsLog(sd);
assertTrue(log.isInProgress());
log.validateLog();
long numTransactions=(log.getLastTxId() - log.getFirstTxId()) + 1;
assertEquals("In-progress log " + log + " should have 5 transactions",5,numTransactions);
;
}
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
admin.run(args);
}
catch ( Exception e) {
throw new IOException(e.getMessage());
}
for ( StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
EditLogFile log=FSImageTestUtil.findLatestEditsLog(sd);
assertTrue(log.isInProgress());
log.validateLog();
long numTransactions=(log.getLastTxId() - log.getFirstTxId()) + 1;
assertEquals("In-progress log " + log + " should only have START txn",1,numTransactions);
}
cluster.shutdown();
cluster=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
try {
renewToken(token1);
renewToken(token2);
}
catch ( IOException e) {
fail("Could not renew or cancel the token");
}
namesystem=cluster.getNamesystem();
Token token3=namesystem.getDelegationToken(new Text(renewer));
Token token4=namesystem.getDelegationToken(new Text(renewer));
cluster.shutdown();
cluster=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
namesystem=cluster.getNamesystem();
Token token5=namesystem.getDelegationToken(new Text(renewer));
try {
renewToken(token1);
renewToken(token2);
renewToken(token3);
renewToken(token4);
renewToken(token5);
}
catch ( IOException e) {
fail("Could not renew or cancel the token");
}
cluster.shutdown();
cluster=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
namesystem=cluster.getNamesystem();
try {
renewToken(token1);
cancelToken(token1);
renewToken(token2);
cancelToken(token2);
renewToken(token3);
cancelToken(token3);
renewToken(token4);
cancelToken(token4);
renewToken(token5);
cancelToken(token5);
}
catch ( IOException e) {
fail("Could not renew or cancel the token");
}
}
finally {
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testReloadOnEditReplayFailure() throws IOException {
Configuration conf=new HdfsConfiguration();
FSDataOutputStream fos=null;
SecondaryNameNode secondary=null;
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs=cluster.getFileSystem();
secondary=startSecondaryNameNode(conf);
fos=fs.create(new Path("tmpfile0"));
fos.write(new byte[]{0,1,2,3});
secondary.doCheckpoint();
fos.write(new byte[]{0,1,2,3});
fos.hsync();
Mockito.doThrow(new IOException("Injecting failure during merge")).when(faultInjector).duringMerge();
try {
secondary.doCheckpoint();
fail("Fault injection failed.");
}
catch ( IOException ioe) {
}
Mockito.reset(faultInjector);
fos.write(new byte[]{0,1,2,3});
fos.hsync();
assertTrue("Another checkpoint should have reloaded image",secondary.doCheckpoint());
}
finally {
if (fs != null) {
fs.close();
}
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
Mockito.reset(faultInjector);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier PublicFieldVerifier HybridVerifier
/**
* Tests checkpoint in HDFS.
*/
@Test public void testCheckpoint() throws IOException {
Path file1=new Path("checkpoint.dat");
Path file2=new Path("checkpoint2.dat");
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0");
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
SecondaryNameNode secondary=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
assertTrue(!fileSys.exists(file2));
DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,replication,seed);
checkFile(fileSys,file1,replication);
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
MetricsRecordBuilder rb=getMetrics(NN_METRICS);
assertCounterGt("GetImageNumOps",0,rb);
assertCounterGt("GetEditNumOps",0,rb);
assertCounterGt("PutImageNumOps",0,rb);
assertGaugeGt("GetImageAvgTime",0.0,rb);
assertGaugeGt("GetEditAvgTime",0.0,rb);
assertGaugeGt("PutImageAvgTime",0.0,rb);
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
Path tmpDir=new Path("/tmp_tmp");
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
checkFile(fileSys,file1,replication);
cleanupFile(fileSys,file1);
DFSTestUtil.createFile(fileSys,file2,fileSize,fileSize,blockSize,replication,seed);
checkFile(fileSys,file2,replication);
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
FSDirectory secondaryFsDir=secondary.getFSNamesystem().dir;
INode rootInMap=secondaryFsDir.getInode(secondaryFsDir.rootDir.getId());
assertSame(rootInMap,secondaryFsDir.rootDir);
fileSys.delete(tmpDir,true);
fileSys.mkdirs(tmpDir);
secondary.doCheckpoint();
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
assertTrue(fileSys.exists(tmpDir));
try {
checkFile(fileSys,file2,replication);
}
finally {
fileSys.close();
cluster.shutdown();
cluster=null;
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that the secondary doesn't have to re-download image
* if it hasn't changed.
*/
@Test public void testSecondaryImageDownload() throws IOException {
LOG.info("Starting testSecondaryImageDownload");
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0");
Path dir=new Path("/checkpoint");
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
cluster.waitActive();
FileSystem fileSys=cluster.getFileSystem();
FSImage image=cluster.getNameNode().getFSImage();
SecondaryNameNode secondary=null;
try {
assertTrue(!fileSys.exists(dir));
secondary=startSecondaryNameNode(conf);
File secondaryDir=new File(MiniDFSCluster.getBaseDirectory(),"namesecondary1");
File secondaryCurrent=new File(secondaryDir,"current");
long expectedTxIdToDownload=cluster.getNameNode().getFSImage().getStorage().getMostRecentCheckpointTxId();
File secondaryFsImageBefore=new File(secondaryCurrent,NNStorage.getImageFileName(expectedTxIdToDownload));
File secondaryFsImageAfter=new File(secondaryCurrent,NNStorage.getImageFileName(expectedTxIdToDownload + 2));
assertFalse("Secondary should start with empty current/ dir " + "but " + secondaryFsImageBefore + " exists",secondaryFsImageBefore.exists());
assertTrue("Secondary should have loaded an image",secondary.doCheckpoint());
assertTrue("Secondary should have downloaded original image",secondaryFsImageBefore.exists());
assertTrue("Secondary should have created a new image",secondaryFsImageAfter.exists());
long fsimageLength=secondaryFsImageBefore.length();
assertEquals("Image size should not have changed",fsimageLength,secondaryFsImageAfter.length());
fileSys.mkdirs(dir);
assertFalse("Another checkpoint should not have to re-load image",secondary.doCheckpoint());
for ( StorageDirectory sd : image.getStorage().dirIterable(NameNodeDirType.IMAGE)) {
File imageFile=NNStorage.getImageFile(sd,NameNodeFile.IMAGE,expectedTxIdToDownload + 5);
assertTrue("Image size increased",imageFile.length() > fsimageLength);
}
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifier UtilityVerifier
@Test public void testNameDirError() throws IOException {
LOG.info("Starting testNameDirError");
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
Collection nameDirs=cluster.getNameDirs(0);
cluster.shutdown();
cluster=null;
for ( URI nameDirUri : nameDirs) {
File dir=new File(nameDirUri.getPath());
try {
FileUtil.setWritable(dir,false);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
fail("NN should have failed to start with " + dir + " set unreadable");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("storage directory does not exist or is not accessible",ioe);
}
finally {
cleanup(cluster);
cluster=null;
FileUtil.setWritable(dir,true);
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Starts two namenodes and two secondary namenodes, verifies that secondary
* namenodes are configured correctly to talk to their respective namenodes
* and can do the checkpoint.
* @throws IOException
*/
@Test public void testMultipleSecondaryNamenodes() throws IOException {
Configuration conf=new HdfsConfiguration();
String nameserviceId1="ns1";
String nameserviceId2="ns2";
conf.set(DFSConfigKeys.DFS_NAMESERVICES,nameserviceId1 + "," + nameserviceId2);
MiniDFSCluster cluster=null;
SecondaryNameNode secondary1=null;
SecondaryNameNode secondary2=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).build();
Configuration snConf1=new HdfsConfiguration(cluster.getConfiguration(0));
Configuration snConf2=new HdfsConfiguration(cluster.getConfiguration(1));
InetSocketAddress nn1RpcAddress=cluster.getNameNode(0).getNameNodeAddress();
InetSocketAddress nn2RpcAddress=cluster.getNameNode(1).getNameNodeAddress();
String nn1=nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
String nn2=nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();
snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"");
snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"");
snConf1.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,nameserviceId1),nn1);
snConf2.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,nameserviceId2),nn2);
secondary1=startSecondaryNameNode(snConf1);
secondary2=startSecondaryNameNode(snConf2);
assertEquals(secondary1.getNameNodeAddress().getPort(),nn1RpcAddress.getPort());
assertEquals(secondary2.getNameNodeAddress().getPort(),nn2RpcAddress.getPort());
assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2.getNameNodeAddress().getPort());
secondary1.doCheckpoint();
secondary2.doCheckpoint();
}
finally {
cleanup(secondary1);
secondary1=null;
cleanup(secondary2);
secondary2=null;
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testTooManyEditReplayFailures() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY,"1");
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY,"1");
FSDataOutputStream fos=null;
SecondaryNameNode secondary=null;
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).checkExitOnShutdown(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
fos=fs.create(new Path("tmpfile0"));
fos.write(new byte[]{0,1,2,3});
Mockito.doThrow(new IOException("Injecting failure during merge")).when(faultInjector).duringMerge();
secondary=startSecondaryNameNode(conf);
secondary.doWork();
fail("2NN did not exit.");
}
catch ( ExitException ee) {
ExitUtil.resetFirstExitException();
assertEquals("Max retries",1,secondary.getMergeErrorCount() - 1);
}
finally {
if (fs != null) {
fs.close();
}
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
Mockito.reset(faultInjector);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test that a fault while downloading edits does not prevent future
* checkpointing
*/
@Test(timeout=30000) public void testEditFailureBeforeRename() throws IOException {
Configuration conf=new HdfsConfiguration();
SecondaryNameNode secondary=null;
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs=cluster.getFileSystem();
secondary=startSecondaryNameNode(conf);
DFSTestUtil.createFile(fs,new Path("tmpfile0"),1024,(short)1,0l);
secondary.doCheckpoint();
Mockito.doThrow(new IOException("Injecting failure before edit rename")).when(faultInjector).beforeEditsRename();
DFSTestUtil.createFile(fs,new Path("tmpfile1"),1024,(short)1,0l);
try {
secondary.doCheckpoint();
fail("Fault injection failed.");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Injecting failure before edit rename",ioe);
}
Mockito.reset(faultInjector);
for ( StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) {
File[] tmpEdits=sd.getCurrentDir().listFiles(tmpEditsFilter);
assertTrue("Expected a single tmp edits file in directory " + sd.toString(),tmpEdits.length == 1);
RandomAccessFile randFile=new RandomAccessFile(tmpEdits[0],"rw");
randFile.setLength(0);
randFile.close();
}
secondary.doCheckpoint();
}
finally {
if (secondary != null) {
secondary.shutdown();
}
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
Mockito.reset(faultInjector);
}
}
APIUtilityVerifier EqualityVerifier
/**
* Regression test for HDFS-3678 "Edit log files are never being purged from 2NN"
*/
@Test public void testSecondaryPurgesEditLogs() throws IOException {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY,0);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
FileSystem fs=cluster.getFileSystem();
fs.mkdirs(new Path("/foo"));
secondary=startSecondaryNameNode(conf);
for (int i=0; i < 5; i++) {
secondary.doCheckpoint();
}
List checkpointDirs=getCheckpointCurrentDirs(secondary);
for ( File checkpointDir : checkpointDirs) {
List editsFiles=FileJournalManager.matchEditLogs(checkpointDir);
assertEquals("Edit log files were not purged from 2NN",1,editsFiles.size());
}
}
finally {
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
/**
* Test that the primary NN will not serve any files to a 2NN who doesn't
* share its namespace ID, and also will not accept any files from one.
*/
@Test public void testNamespaceVerifiedOnFileTransfer() throws IOException {
MiniDFSCluster cluster=null;
Configuration conf=new HdfsConfiguration();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
NamenodeProtocols nn=cluster.getNameNodeRpc();
URL fsName=DFSUtil.getInfoServer(cluster.getNameNode().getServiceRpcAddress(),conf,DFSUtil.getHttpClientScheme(conf)).toURL();
nn.rollEditLog();
RemoteEditLogManifest manifest=nn.getEditLogManifest(1);
RemoteEditLog log=manifest.getLogs().get(0);
NNStorage dstImage=Mockito.mock(NNStorage.class);
Mockito.doReturn(Lists.newArrayList(new File("/wont-be-written"))).when(dstImage).getFiles(Mockito.anyObject(),Mockito.anyString());
File mockImageFile=File.createTempFile("image","");
FileOutputStream imageFile=new FileOutputStream(mockImageFile);
imageFile.write("data".getBytes());
imageFile.close();
Mockito.doReturn(mockImageFile).when(dstImage).findImageFile(Mockito.any(NameNodeFile.class),Mockito.anyLong());
Mockito.doReturn(new StorageInfo(1,1,"X",1,NodeType.NAME_NODE).toColonSeparatedString()).when(dstImage).toColonSeparatedString();
try {
TransferFsImage.downloadImageToStorage(fsName,0,dstImage,false);
fail("Storage info was not verified");
}
catch ( IOException ioe) {
String msg=StringUtils.stringifyException(ioe);
assertTrue(msg,msg.contains("but the secondary expected"));
}
try {
TransferFsImage.downloadEditsToStorage(fsName,log,dstImage);
fail("Storage info was not verified");
}
catch ( IOException ioe) {
String msg=StringUtils.stringifyException(ioe);
assertTrue(msg,msg.contains("but the secondary expected"));
}
try {
TransferFsImage.uploadImageFromStorage(fsName,conf,dstImage,NameNodeFile.IMAGE,0);
fail("Storage info was not verified");
}
catch ( IOException ioe) {
String msg=StringUtils.stringifyException(ioe);
assertTrue(msg,msg.contains("but the secondary expected"));
}
}
finally {
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Tests save namespace.
*/
@Test public void testSaveNamespace() throws IOException {
MiniDFSCluster cluster=null;
DistributedFileSystem fs=null;
FileContext fc;
try {
Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
cluster.waitActive();
fs=(cluster.getFileSystem());
fc=FileContext.getFileContext(cluster.getURI(0));
DFSAdmin admin=new DFSAdmin(conf);
String[] args=new String[]{"-saveNamespace"};
try {
admin.run(args);
}
catch ( IOException eIO) {
assertTrue(eIO.getLocalizedMessage().contains("Safe mode should be turned ON"));
}
catch ( Exception e) {
throw new IOException(e);
}
Path file=new Path("namespace.dat");
DFSTestUtil.createFile(fs,file,fileSize,fileSize,blockSize,replication,seed);
checkFile(fs,file,replication);
Path symlink=new Path("file.link");
fc.createSymlink(file,symlink,false);
assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
Collection editsDirs=cluster.getNameEditsDirs(0);
for ( URI uri : editsDirs) {
File ed=new File(uri.getPath());
assertTrue(new File(ed,"current/" + NNStorage.getInProgressEditsFileName(1)).length() > Integer.SIZE / Byte.SIZE);
}
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
admin.run(args);
}
catch ( Exception e) {
throw new IOException(e);
}
final int EXPECTED_TXNS_FIRST_SEG=13;
for ( URI uri : editsDirs) {
File ed=new File(uri.getPath());
File curDir=new File(ed,"current");
LOG.info("Files in " + curDir + ":\n "+ Joiner.on("\n ").join(curDir.list()));
File originalEdits=new File(curDir,NNStorage.getInProgressEditsFileName(1));
assertFalse(originalEdits.exists());
File finalizedEdits=new File(curDir,NNStorage.getFinalizedEditsFileName(1,EXPECTED_TXNS_FIRST_SEG));
GenericTestUtils.assertExists(finalizedEdits);
assertTrue(finalizedEdits.length() > Integer.SIZE / Byte.SIZE);
GenericTestUtils.assertExists(new File(ed,"current/" + NNStorage.getInProgressEditsFileName(EXPECTED_TXNS_FIRST_SEG + 1)));
}
Collection imageDirs=cluster.getNameDirs(0);
for ( URI uri : imageDirs) {
File imageDir=new File(uri.getPath());
File savedImage=new File(imageDir,"current/" + NNStorage.getImageFileName(EXPECTED_TXNS_FIRST_SEG));
assertTrue("Should have saved image at " + savedImage,savedImage.exists());
}
cluster.shutdown();
cluster=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fs=(cluster.getFileSystem());
checkFile(fs,file,replication);
fc=FileContext.getFileContext(cluster.getURI(0));
assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
}
finally {
if (fs != null) fs.close();
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Regression test for HDFS-3849. This makes sure that when we re-load the
* FSImage in the 2NN, we clear the existing leases.
*/
@Test public void testSecondaryNameNodeWithSavedLeases() throws IOException {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
FSDataOutputStream fos=null;
Configuration conf=new HdfsConfiguration();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
FileSystem fs=cluster.getFileSystem();
fos=fs.create(new Path("tmpfile"));
fos.write(new byte[]{0,1,2,3});
fos.hflush();
assertEquals(1,cluster.getNamesystem().getLeaseManager().countLease());
secondary=startSecondaryNameNode(conf);
assertEquals(0,secondary.getFSNamesystem().getLeaseManager().countLease());
secondary.doCheckpoint();
assertEquals(1,secondary.getFSNamesystem().getLeaseManager().countLease());
fos.close();
fos=null;
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER,false);
cluster.getNameNodeRpc().saveNamespace();
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false);
secondary.doCheckpoint();
assertEquals(0,secondary.getFSNamesystem().getLeaseManager().countLease());
}
finally {
if (fos != null) {
fos.close();
}
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLegacyOivImage() throws Exception {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
File tmpDir=Files.createTempDir();
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY,tmpDir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY,"2");
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
String files1[]=tmpDir.list();
assertEquals("Only one file is expected",1,files1.length);
secondary.doCheckpoint();
secondary.doCheckpoint();
String files2[]=tmpDir.list();
assertEquals("Two files are expected",2,files2.length);
for ( String fName : files2) {
assertFalse(fName.equals(files1[0]));
}
}
finally {
cleanup(secondary);
cleanup(cluster);
tmpDir.delete();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test that the secondary namenode correctly deletes temporary edits
* on startup.
*/
@Test(timeout=60000) public void testDeleteTemporaryEditsOnStartup() throws IOException {
Configuration conf=new HdfsConfiguration();
SecondaryNameNode secondary=null;
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs=cluster.getFileSystem();
secondary=startSecondaryNameNode(conf);
DFSTestUtil.createFile(fs,new Path("tmpfile0"),1024,(short)1,0l);
secondary.doCheckpoint();
Mockito.doThrow(new IOException("Injecting failure before edit rename")).when(faultInjector).beforeEditsRename();
DFSTestUtil.createFile(fs,new Path("tmpfile1"),1024,(short)1,0l);
try {
secondary.doCheckpoint();
fail("Fault injection failed.");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Injecting failure before edit rename",ioe);
}
Mockito.reset(faultInjector);
for ( StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) {
File[] tmpEdits=sd.getCurrentDir().listFiles(tmpEditsFilter);
assertTrue("Expected a single tmp edits file in directory " + sd.toString(),tmpEdits.length == 1);
}
secondary.shutdown();
secondary=startSecondaryNameNode(conf);
for ( StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) {
File[] tmpEdits=sd.getCurrentDir().listFiles(tmpEditsFilter);
assertTrue("Did not expect a tmp edits file in directory " + sd.toString(),tmpEdits.length == 0);
}
secondary.doCheckpoint();
}
finally {
if (secondary != null) {
secondary.shutdown();
}
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
Mockito.reset(faultInjector);
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier EqualityVerifier HybridVerifier
/**
* Test namenode format with -format -force -clusterid option when name
* directory exists. Format should succeed.
* @throws IOException
*/
@Test public void testFormatWithForceAndClusterId() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String myId="testFormatWithForceAndClusterId";
String[] argv={"-format","-force","-clusterid",myId};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cId=getClusterId(config);
assertEquals("ClusterIds do not match",myId,cId);
}
APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test namenode format with -format -force options when name directory
* exists. Format should succeed.
* @throws IOException
*/
@Test public void testFormatWithForce() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String[] argv={"-format","-force"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test namenode format with -force -nonInteractive -force option. Format
* should succeed.
* @throws IOException
*/
@Test public void testFormatWithNonInteractiveAndForce() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String[] argv={"-format","-nonInteractive","-force"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test namenode format with -format option when an empty name directory
* exists. Format should succeed.
* @throws IOException
*/
@Test public void testFormatWithEmptyDir() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String[] argv={"-format"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test namenode format with -format -nonInteractive options when name
* directory does not exist. Format should succeed.
* @throws IOException
*/
@Test public void testFormatWithNonInteractiveNameDirDoesNotExit() throws IOException {
String[] argv={"-format","-nonInteractive"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test namenode format with -format option when a non empty name directory
* exists. Enter Y when prompted and the format should succeed.
* @throws IOException
* @throws InterruptedException
*/
@Test public void testFormatWithoutForceEnterYes() throws IOException, InterruptedException {
File data=new File(hdfsDir,"file");
if (!data.mkdirs()) {
fail("Failed to create dir " + data.getPath());
}
InputStream origIn=System.in;
ByteArrayInputStream bins=new ByteArrayInputStream("Y\n".getBytes());
System.setIn(bins);
String[] argv={"-format"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
System.setIn(origIn);
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
APIUtilityVerifier BooleanVerifier
@Test public void testFormatClusterIdOption() throws IOException {
NameNode.format(config);
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
StartupOption.FORMAT.setClusterId("mycluster");
NameNode.format(config);
cid=getClusterId(config);
assertTrue("ClusterId didn't match",cid.equals("mycluster"));
StartupOption.FORMAT.setClusterId("");
NameNode.format(config);
String newCid=getClusterId(config);
assertFalse("ClusterId should not be the same",newCid.equals(cid));
}
APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test namenode format with -format option. Format should succeed.
* @throws IOException
*/
@Test public void testFormat() throws IOException {
String[] argv={"-format"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test to ensure namenode rejects request from dead datanode
* - Start a cluster
* - Shutdown the datanode and wait for it to be marked dead at the namenode
* - Send datanode requests to Namenode and make sure it is rejected
* appropriately.
*/
@Test public void testDeadDatanode() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,500);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1L);
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
String poolId=cluster.getNamesystem().getBlockPoolId();
DataNode dn=cluster.getDataNodes().get(0);
DatanodeRegistration reg=DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0),poolId);
waitForDatanodeState(reg.getDatanodeUuid(),true,20000);
dn.shutdown();
waitForDatanodeState(reg.getDatanodeUuid(),false,20000);
DatanodeProtocol dnp=cluster.getNameNodeRpc();
ReceivedDeletedBlockInfo[] blocks={new ReceivedDeletedBlockInfo(new Block(0),ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,null)};
StorageReceivedDeletedBlocks[] storageBlocks={new StorageReceivedDeletedBlocks(reg.getDatanodeUuid(),blocks)};
try {
dnp.blockReceivedAndDeleted(reg,poolId,storageBlocks);
fail("Expected IOException is not thrown");
}
catch ( IOException ex) {
}
StorageBlockReport[] report={new StorageBlockReport(new DatanodeStorage(reg.getDatanodeUuid()),new long[]{0L,0L,0L})};
try {
dnp.blockReport(reg,poolId,report);
fail("Expected IOException is not thrown");
}
catch ( IOException ex) {
}
StorageReport[] rep={new StorageReport(new DatanodeStorage(reg.getDatanodeUuid()),false,0,0,0,0)};
DatanodeCommand[] cmd=dnp.sendHeartbeat(reg,rep,0L,0L,0,0,0).getCommands();
assertEquals(1,cmd.length);
assertEquals(cmd[0].getAction(),RegisterCommand.REGISTER.getAction());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test loading an editlog which has had both its storage fail
* on alternating rolls. Two edit log directories are created.
* The first one fails on odd rolls, the second on even. Test
* that we are able to load the entire editlog regardless.
*/
@Test public void testAlternatingJournalFailure() throws IOException {
File f1=new File(TEST_DIR + "/alternatingjournaltest0");
File f2=new File(TEST_DIR + "/alternatingjournaltest1");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI());
NNStorage storage=setupEdits(editUris,10,new AbortSpec(1,0),new AbortSpec(2,1),new AbortSpec(3,0),new AbortSpec(4,1),new AbortSpec(5,0),new AbortSpec(6,1),new AbortSpec(7,0),new AbortSpec(8,1),new AbortSpec(9,0),new AbortSpec(10,1));
long totaltxnread=0;
FSEditLog editlog=getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId=1;
Iterable editStreams=editlog.selectInputStreams(startTxId,TXNS_PER_ROLL * 11);
for ( EditLogInputStream edits : editStreams) {
FSEditLogLoader.EditLogValidation val=FSEditLogLoader.validateEditLog(edits);
long read=(val.getEndTxId() - edits.getFirstTxId()) + 1;
LOG.info("Loading edits " + edits + " read "+ read);
assertEquals(startTxId,edits.getFirstTxId());
startTxId+=read;
totaltxnread+=read;
}
editlog.close();
storage.close();
assertEquals(TXNS_PER_ROLL * 11,totaltxnread);
}
APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test loading an editlog with gaps. A single editlog directory
* is set up. On of the edit log files is deleted. This should
* fail when selecting the input streams as it will not be able
* to select enough streams to load up to 4*TXNS_PER_ROLL.
* There should be 4*TXNS_PER_ROLL transactions as we rolled 3
* times.
*/
@Test public void testLoadingWithGaps() throws IOException {
File f1=new File(TEST_DIR + "/gaptest0");
List editUris=ImmutableList.of(f1.toURI());
NNStorage storage=setupEdits(editUris,3);
final long startGapTxId=1 * TXNS_PER_ROLL + 1;
final long endGapTxId=2 * TXNS_PER_ROLL;
File[] files=new File(f1,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId,endGapTxId))) {
return true;
}
return false;
}
}
);
assertEquals(1,files.length);
assertTrue(files[0].delete());
FSEditLog editlog=getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId=1;
try {
editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL);
fail("Should have thrown exception");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Gap in transactions. Expected to be able to read up until " + "at least txid 40 but unable to find any edit logs containing " + "txid 11",ioe);
}
}
APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test edit log failover. If a single edit log is missing, other
* edits logs should be used instead.
*/
@Test public void testEditLogFailOverFromMissing() throws IOException {
File f1=new File(TEST_DIR + "/failover0");
File f2=new File(TEST_DIR + "/failover1");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI());
NNStorage storage=setupEdits(editUris,3);
final long startErrorTxId=1 * TXNS_PER_ROLL + 1;
final long endErrorTxId=2 * TXNS_PER_ROLL;
File[] files=new File(f1,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,endErrorTxId))) {
return true;
}
return false;
}
}
);
assertEquals(1,files.length);
assertTrue(files[0].delete());
FSEditLog editlog=getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId=1;
Collection streams=null;
try {
streams=editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL);
readAllEdits(streams,startTxId);
}
catch ( IOException e) {
LOG.error("edit log failover didn't work",e);
fail("Edit log failover didn't work");
}
finally {
IOUtils.cleanup(null,streams.toArray(new EditLogInputStream[0]));
}
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
/**
* Test edit log failover from a corrupt edit log
*/
@Test public void testEditLogFailOverFromCorrupt() throws IOException {
File f1=new File(TEST_DIR + "/failover0");
File f2=new File(TEST_DIR + "/failover1");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI());
NNStorage storage=setupEdits(editUris,3);
final long startErrorTxId=1 * TXNS_PER_ROLL + 1;
final long endErrorTxId=2 * TXNS_PER_ROLL;
File[] files=new File(f1,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,endErrorTxId))) {
return true;
}
return false;
}
}
);
assertEquals(1,files.length);
long fileLen=files[0].length();
LOG.debug("Corrupting Log File: " + files[0] + " len: "+ fileLen);
RandomAccessFile rwf=new RandomAccessFile(files[0],"rw");
rwf.seek(fileLen - 4);
int b=rwf.readInt();
rwf.seek(fileLen - 4);
rwf.writeInt(b + 1);
rwf.close();
FSEditLog editlog=getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId=1;
Collection streams=null;
try {
streams=editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL);
readAllEdits(streams,startTxId);
}
catch ( IOException e) {
LOG.error("edit log failover didn't work",e);
fail("Edit log failover didn't work");
}
finally {
IOUtils.cleanup(null,streams.toArray(new EditLogInputStream[0]));
}
}
APIUtilityVerifier EqualityVerifier
/**
* Test case for an empty edit log from a prior version of Hadoop.
*/
@Test public void testPreTxIdEditLogNoEdits() throws Exception {
FSNamesystem namesys=Mockito.mock(FSNamesystem.class);
namesys.dir=Mockito.mock(FSDirectory.class);
long numEdits=testLoad(StringUtils.hexStringToByte("ffffffed"),namesys);
assertEquals(0,numEdits);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test case for loading a very simple edit log from a format
* prior to the inclusion of edit transaction IDs in the log.
*/
@Test public void testPreTxidEditLogWithEdits() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final FSNamesystem namesystem=cluster.getNamesystem();
long numEdits=testLoad(HADOOP20_SOME_EDITS,namesystem);
assertEquals(3,numEdits);
HdfsFileStatus fileInfo=namesystem.getFileInfo("/myfile",false);
assertEquals("supergroup",fileInfo.getGroup());
assertEquals(3,fileInfo.getReplication());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Tests the getEditLogManifest function using mock storage for a number
* of different situations.
*/
@Test public void testEditLogManifestMocks() throws IOException {
NNStorage storage;
FSEditLog log;
storage=mockStorageWithEdits("[1,100]|[101,200]|[201,]","[1,100]|[101,200]|[201,]");
log=getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]]",log.getEditLogManifest(1).toString());
assertEquals("[[101,200]]",log.getEditLogManifest(101).toString());
storage=mockStorageWithEdits("[1,100]|[101,200]","[1,100]|[201,300]|[301,400]");
log=getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200], [201,300], [301,400]]",log.getEditLogManifest(1).toString());
storage=mockStorageWithEdits("[1,100]|[301,400]","[301,400]|[401,500]");
log=getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[301,400], [401,500]]",log.getEditLogManifest(1).toString());
storage=mockStorageWithEdits("[1,100]|[101,150]","[1,50]|[101,200]");
log=getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]]",log.getEditLogManifest(1).toString());
assertEquals("[[101,200]]",log.getEditLogManifest(101).toString());
storage=mockStorageWithEdits("[1,100]|[101,]","[1,100]|[101,200]");
log=getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]]",log.getEditLogManifest(1).toString());
assertEquals("[[101,200]]",log.getEditLogManifest(101).toString());
}
APIUtilityVerifier UtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testEditChecksum() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
final FSEditLog editLog=fsimage.getEditLog();
fileSys.mkdirs(new Path("/tmp"));
Iterator iter=fsimage.getStorage().dirIterator(NameNodeDirType.EDITS);
LinkedList sds=new LinkedList();
while (iter.hasNext()) {
sds.add(iter.next());
}
editLog.close();
cluster.shutdown();
for ( StorageDirectory sd : sds) {
File editFile=NNStorage.getFinalizedEditsFile(sd,1,3);
assertTrue(editFile.exists());
long fileLen=editFile.length();
LOG.debug("Corrupting Log File: " + editFile + " len: "+ fileLen);
RandomAccessFile rwf=new RandomAccessFile(editFile,"rw");
rwf.seek(fileLen - 4);
int b=rwf.readInt();
rwf.seek(fileLen - 4);
rwf.writeInt(b + 1);
rwf.close();
}
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
fail("should not be able to start");
}
catch ( IOException e) {
assertNotNull("Cause of exception should be ChecksumException",e.getCause());
assertEquals("Cause of exception should be ChecksumException",ChecksumException.class,e.getCause().getClass());
}
}
APIUtilityVerifier EqualityVerifier ConditionMatcher HybridVerifier
@Test public void testReadURL() throws Exception {
HttpURLConnection conn=mock(HttpURLConnection.class);
doReturn(new ByteArrayInputStream(FAKE_LOG_DATA)).when(conn).getInputStream();
doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode();
doReturn(Integer.toString(FAKE_LOG_DATA.length)).when(conn).getHeaderField("Content-Length");
URLConnectionFactory factory=mock(URLConnectionFactory.class);
doReturn(conn).when(factory).openConnection(Mockito.any(),anyBoolean());
URL url=new URL("http://localhost/fakeLog");
EditLogInputStream elis=EditLogFileInputStream.fromUrl(factory,url,HdfsConstants.INVALID_TXID,HdfsConstants.INVALID_TXID,false);
EnumMap> counts=FSImageTestUtil.countEditLogOpTypes(elis);
assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held,is(1));
assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held,is(1));
assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held,is(1));
assertEquals(FAKE_LOG_DATA.length,elis.length());
elis.close();
}
APIUtilityVerifier BooleanVerifier
/**
* Tests EditLogFileOutputStream doesn't throw NullPointerException on
* close/close sequence. See HDFS-2011.
*/
@Test public void testEditLogFileOutputStreamCloseClose() throws IOException {
EditLogFileOutputStream editLogStream=new EditLogFileOutputStream(conf,TEST_EDITS,0);
editLogStream.close();
try {
editLogStream.close();
}
catch ( IOException ioe) {
String msg=StringUtils.stringifyException(ioe);
assertTrue(msg,msg.contains("Trying to use aborted output stream"));
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Tests rolling edit logs while transactions are ongoing.
*/
@Test public void testEditLogRolling() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
AtomicReference caughtErr=new AtomicReference();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
StorageDirectory sd=fsimage.getStorage().getStorageDir(0);
startTransactionWorkers(namesystem,caughtErr);
long previousLogTxId=1;
for (int i=0; i < NUM_ROLLS && caughtErr.get() == null; i++) {
try {
Thread.sleep(20);
}
catch ( InterruptedException e) {
}
LOG.info("Starting roll " + i + ".");
CheckpointSignature sig=namesystem.rollEditLog();
long nextLog=sig.curSegmentTxId;
String logFileName=NNStorage.getFinalizedEditsFileName(previousLogTxId,nextLog - 1);
previousLogTxId+=verifyEditLogs(namesystem,fsimage,logFileName,previousLogTxId);
assertEquals(previousLogTxId,nextLog);
File expectedLog=NNStorage.getInProgressEditsFile(sd,previousLogTxId);
assertTrue("Expect " + expectedLog + " to exist",expectedLog.exists());
}
}
finally {
stopTransactionWorkers();
if (caughtErr.get() != null) {
throw new RuntimeException(caughtErr.get());
}
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* The logSync() method in FSEditLog is unsynchronized whiel syncing
* so that other threads can concurrently enqueue edits while the prior
* sync is ongoing. This test checks that the log is saved correctly
* if the saveImage occurs while the syncing thread is in the unsynchronized middle section.
* This replicates the following manual test proposed by Konstantin:
* I start the name-node in debugger.
* I do -mkdir and stop the debugger in logSync() just before it does flush.
* Then I enter safe mode with another client
* I start saveNamepsace and stop the debugger in
* FSImage.saveFSImage() -> FSEditLog.createEditLogFile()
* -> EditLogFileOutputStream.create() ->
* after truncating the file but before writing LAYOUT_VERSION into it.
* Then I let logSync() run.
* Then I terminate the name-node.
* After that the name-node wont start, since the edits file is broken.
*/
@Test public void testSaveImageWhileSyncInProgress() throws Exception {
Configuration conf=getConf();
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
final FSNamesystem namesystem=FSNamesystem.loadFromDisk(conf);
try {
FSImage fsimage=namesystem.getFSImage();
FSEditLog editLog=fsimage.getEditLog();
JournalAndStream jas=editLog.getJournals().get(0);
EditLogFileOutputStream spyElos=spy((EditLogFileOutputStream)jas.getCurrentStream());
jas.setCurrentStreamForTests(spyElos);
final AtomicReference deferredException=new AtomicReference();
final CountDownLatch waitToEnterFlush=new CountDownLatch(1);
final Thread doAnEditThread=new Thread(){
@Override public void run(){
try {
LOG.info("Starting mkdirs");
namesystem.mkdirs("/test",new PermissionStatus("test","test",new FsPermission((short)00755)),true);
LOG.info("mkdirs complete");
}
catch ( Throwable ioe) {
LOG.fatal("Got exception",ioe);
deferredException.set(ioe);
waitToEnterFlush.countDown();
}
}
}
;
Answer blockingFlush=new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
LOG.info("Flush called");
if (Thread.currentThread() == doAnEditThread) {
LOG.info("edit thread: Telling main thread we made it to flush section...");
waitToEnterFlush.countDown();
LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
Thread.sleep(BLOCK_TIME * 1000);
LOG.info("Going through to flush. This will allow the main thread to continue.");
}
invocation.callRealMethod();
LOG.info("Flush complete");
return null;
}
}
;
doAnswer(blockingFlush).when(spyElos).flush();
doAnEditThread.start();
LOG.info("Main thread: waiting to enter flush...");
waitToEnterFlush.await();
assertNull(deferredException.get());
LOG.info("Main thread: detected that logSync is in unsynchronized section.");
LOG.info("Trying to enter safe mode.");
LOG.info("This should block for " + BLOCK_TIME + "sec, since flush will sleep that long");
long st=Time.now();
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
long et=Time.now();
LOG.info("Entered safe mode");
assertTrue(et - st > (BLOCK_TIME - 1) * 1000);
namesystem.saveNamespace();
LOG.info("Joining on edit thread...");
doAnEditThread.join();
assertNull(deferredException.get());
assertEquals(3,verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(1,3),1));
assertEquals(1,verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(4),4));
}
finally {
LOG.info("Closing namesystem");
if (namesystem != null) namesystem.close();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Most of the FSNamesystem methods have a synchronized section where they
* update the name system itself and write to the edit log, and then
* unsynchronized, they call logSync. This test verifies that, if an
* operation has written to the edit log but not yet synced it,
* we wait for that sync before entering safe mode.
*/
@Test public void testSaveRightBeforeSync() throws Exception {
Configuration conf=getConf();
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
final FSNamesystem namesystem=FSNamesystem.loadFromDisk(conf);
try {
FSImage fsimage=namesystem.getFSImage();
FSEditLog editLog=spy(fsimage.getEditLog());
fsimage.editLog=editLog;
final AtomicReference deferredException=new AtomicReference();
final CountDownLatch waitToEnterSync=new CountDownLatch(1);
final Thread doAnEditThread=new Thread(){
@Override public void run(){
try {
LOG.info("Starting mkdirs");
namesystem.mkdirs("/test",new PermissionStatus("test","test",new FsPermission((short)00755)),true);
LOG.info("mkdirs complete");
}
catch ( Throwable ioe) {
LOG.fatal("Got exception",ioe);
deferredException.set(ioe);
waitToEnterSync.countDown();
}
}
}
;
Answer blockingSync=new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
LOG.info("logSync called");
if (Thread.currentThread() == doAnEditThread) {
LOG.info("edit thread: Telling main thread we made it just before logSync...");
waitToEnterSync.countDown();
LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
Thread.sleep(BLOCK_TIME * 1000);
LOG.info("Going through to logSync. This will allow the main thread to continue.");
}
invocation.callRealMethod();
LOG.info("logSync complete");
return null;
}
}
;
doAnswer(blockingSync).when(editLog).logSync();
doAnEditThread.start();
LOG.info("Main thread: waiting to just before logSync...");
waitToEnterSync.await();
assertNull(deferredException.get());
LOG.info("Main thread: detected that logSync about to be called.");
LOG.info("Trying to enter safe mode.");
LOG.info("This should block for " + BLOCK_TIME + "sec, since we have pending edits");
long st=Time.now();
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
long et=Time.now();
LOG.info("Entered safe mode");
assertTrue(et - st > (BLOCK_TIME - 1) * 1000);
namesystem.saveNamespace();
LOG.info("Joining on edit thread...");
doAnEditThread.join();
assertNull(deferredException.get());
assertEquals(3,verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(1,3),1));
assertEquals(1,verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(4),4));
}
finally {
LOG.info("Closing namesystem");
if (namesystem != null) namesystem.close();
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier
/**
* Dump the tree, make some changes, and then dump the tree again.
*/
@Test public void testDumpTree() throws Exception {
final INode root=fsdir.getINode("/");
LOG.info("Original tree");
final StringBuffer b1=root.dumpTreeRecursively();
System.out.println("b1=" + b1);
final BufferedReader in=new BufferedReader(new StringReader(b1.toString()));
String line=in.readLine();
checkClassName(line);
for (; (line=in.readLine()) != null; ) {
line=line.trim();
if (!line.isEmpty() && !line.contains("snapshot")) {
assertTrue("line=" + line,line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM) || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM));
checkClassName(line);
}
}
}
APIUtilityVerifier IterativeVerifier EqualityVerifier
/**
* Test setting and removing multiple xattrs via single operations
*/
@Test(timeout=300000) public void testXAttrMultiSetRemove() throws Exception {
List existingXAttrs=Lists.newArrayListWithCapacity(0);
final Random rand=new Random(0xFEEDA);
int numExpectedXAttrs=0;
while (numExpectedXAttrs < numGeneratedXAttrs) {
LOG.info("Currently have " + numExpectedXAttrs + " xattrs");
final int numToAdd=rand.nextInt(5) + 1;
List toAdd=Lists.newArrayListWithCapacity(numToAdd);
for (int i=0; i < numToAdd; i++) {
if (numExpectedXAttrs >= numGeneratedXAttrs) {
break;
}
toAdd.add(generatedXAttrs.get(numExpectedXAttrs));
numExpectedXAttrs++;
}
LOG.info("Attempting to add " + toAdd.size() + " XAttrs");
for (int i=0; i < toAdd.size(); i++) {
LOG.info("Will add XAttr " + toAdd.get(i));
}
List newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE));
verifyXAttrsPresent(newXAttrs,numExpectedXAttrs);
existingXAttrs=newXAttrs;
}
while (numExpectedXAttrs > 0) {
LOG.info("Currently have " + numExpectedXAttrs + " xattrs");
final int numToRemove=rand.nextInt(5) + 1;
List toRemove=Lists.newArrayListWithCapacity(numToRemove);
for (int i=0; i < numToRemove; i++) {
if (numExpectedXAttrs == 0) {
break;
}
toRemove.add(generatedXAttrs.get(numExpectedXAttrs - 1));
numExpectedXAttrs--;
}
final int expectedNumToRemove=toRemove.size();
LOG.info("Attempting to remove " + expectedNumToRemove + " XAttrs");
List removedXAttrs=Lists.newArrayList();
List newXAttrs=fsdir.filterINodeXAttrs(existingXAttrs,toRemove,removedXAttrs);
assertEquals("Unexpected number of removed XAttrs",expectedNumToRemove,removedXAttrs.size());
verifyXAttrsPresent(newXAttrs,numExpectedXAttrs);
existingXAttrs=newXAttrs;
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDisplayRecentEditLogOpCodes() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).enableManagedDfsDirsRedundancy(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
for (int i=0; i < 20; i++) {
fileSys.mkdirs(new Path("/tmp/tmp" + i));
}
StorageDirectory sd=fsimage.getStorage().dirIterator(NameNodeDirType.EDITS).next();
cluster.shutdown();
File editFile=FSImageTestUtil.findLatestEditsLog(sd).getFile();
assertTrue("Should exist: " + editFile,editFile.exists());
long fileLen=editFile.length();
RandomAccessFile rwf=new RandomAccessFile(editFile,"rw");
rwf.seek(fileLen - 40);
for (int i=0; i < 20; i++) {
rwf.write(FSEditLogOpCodes.OP_DELETE.getOpCode());
}
rwf.close();
StringBuilder bld=new StringBuilder();
bld.append("^Error replaying edit log at offset \\d+. ");
bld.append("Expected transaction ID was \\d+\n");
bld.append("Recent opcode offsets: (\\d+\\s*){4}$");
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).enableManagedDfsDirsRedundancy(false).format(false).build();
fail("should not be able to start");
}
catch ( IOException e) {
assertTrue("error message contains opcodes message",e.getMessage().matches(bld.toString()));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testValidateEmptyEditLog() throws IOException {
File testDir=new File(TEST_DIR,"testValidateEmptyEditLog");
SortedMap offsetToTxId=Maps.newTreeMap();
File logFile=prepareUnfinalizedTestEditLog(testDir,0,offsetToTxId);
truncateFile(logFile,8);
EditLogValidation validation=EditLogFileInputStream.validateEditLog(logFile);
assertTrue(!validation.hasCorruptHeader());
assertEquals(HdfsConstants.INVALID_TXID,validation.getEndTxId());
}
APIUtilityVerifier BooleanVerifier
@Test public void testValidateEditLogWithCorruptHeader() throws IOException {
File testDir=new File(TEST_DIR,"testValidateEditLogWithCorruptHeader");
SortedMap offsetToTxId=Maps.newTreeMap();
File logFile=prepareUnfinalizedTestEditLog(testDir,2,offsetToTxId);
RandomAccessFile rwf=new RandomAccessFile(logFile,"rw");
try {
rwf.seek(0);
rwf.writeLong(42);
}
finally {
rwf.close();
}
EditLogValidation validation=EditLogFileInputStream.validateEditLog(logFile);
assertTrue(validation.hasCorruptHeader());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testValidateEditLogWithCorruptBody() throws IOException {
File testDir=new File(TEST_DIR,"testValidateEditLogWithCorruptBody");
SortedMap offsetToTxId=Maps.newTreeMap();
final int NUM_TXNS=20;
File logFile=prepareUnfinalizedTestEditLog(testDir,NUM_TXNS,offsetToTxId);
File logFileBak=new File(testDir,logFile.getName() + ".bak");
Files.copy(logFile,logFileBak);
EditLogValidation validation=EditLogFileInputStream.validateEditLog(logFile);
assertTrue(!validation.hasCorruptHeader());
assertEquals(NUM_TXNS + 1,validation.getEndTxId());
for ( Map.Entry entry : offsetToTxId.entrySet()) {
long txOffset=entry.getKey();
long txId=entry.getValue();
Files.copy(logFileBak,logFile);
corruptByteInFile(logFile,txOffset);
validation=EditLogFileInputStream.validateEditLog(logFile);
long expectedEndTxId=(txId == (NUM_TXNS + 1)) ? NUM_TXNS : (NUM_TXNS + 1);
assertEquals("Failed when corrupting txn opcode at " + txOffset,expectedEndTxId,validation.getEndTxId());
assertTrue(!validation.hasCorruptHeader());
}
for ( Map.Entry entry : offsetToTxId.entrySet()) {
long txOffset=entry.getKey();
long txId=entry.getValue();
Files.copy(logFileBak,logFile);
truncateFile(logFile,txOffset);
validation=EditLogFileInputStream.validateEditLog(logFile);
long expectedEndTxId=(txId == 0) ? HdfsConstants.INVALID_TXID : (txId - 1);
assertEquals("Failed when corrupting txid " + txId + " txn opcode "+ "at "+ txOffset,expectedEndTxId,validation.getEndTxId());
assertTrue(!validation.hasCorruptHeader());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Ensure that the digest written by the saver equals to the digest of the
* file.
*/
@Test public void testDigest() throws IOException {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
DistributedFileSystem fs=cluster.getFileSystem();
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
File currentDir=FSImageTestUtil.getNameNodeCurrentDirs(cluster,0).get(0);
File fsimage=FSImageTestUtil.findNewestImageFile(currentDir.getAbsolutePath());
assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage),MD5FileUtils.computeMd5ForFile(fsimage));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test when there is snapshot taken on root
*/
@Test public void testSnapshotOnRoot() throws Exception {
final Path root=new Path("/");
hdfs.allowSnapshot(root);
hdfs.createSnapshot(root,"s1");
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn=cluster.getNamesystem();
hdfs=cluster.getFileSystem();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn=cluster.getNamesystem();
hdfs=cluster.getFileSystem();
INodeDirectory rootNode=fsn.dir.getINode4Write(root.toString()).asDirectory();
assertTrue("The children list of root should be empty",rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
List diffList=rootNode.getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s1=rootNode.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),diffList.get(0).getSnapshotId());
assertEquals(1,fsn.getSnapshotManager().getNumSnapshottableDirs());
SnapshottableDirectoryStatus[] sdirs=fsn.getSnapshotManager().getSnapshottableDirListing(null);
assertEquals(root,sdirs[0].getFullPath());
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn=cluster.getNamesystem();
hdfs=cluster.getFileSystem();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test fsimage loading when 1) there is an empty file loaded from fsimage,
* and 2) there is later an append operation to be applied from edit log.
*/
@Test(timeout=60000) public void testLoadImageWithEmptyFile() throws Exception {
Path file=new Path(dir,"file");
FSDataOutputStream out=hdfs.create(file);
out.close();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
out=hdfs.append(file);
out.write(1);
out.close();
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
hdfs=cluster.getFileSystem();
FileStatus status=hdfs.getFileStatus(file);
assertEquals(1,status.getLen());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testReplQueuesActiveAfterStartupSafemode() throws IOException, InterruptedException {
Configuration conf=new Configuration();
FSEditLog fsEditLog=Mockito.mock(FSEditLog.class);
FSImage fsImage=Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsNamesystem=new FSNamesystem(conf,fsImage);
FSNamesystem fsn=Mockito.spy(fsNamesystem);
HAContext haContext=Mockito.mock(HAContext.class);
HAState haState=Mockito.mock(HAState.class);
Mockito.when(haContext.getState()).thenReturn(haState);
Mockito.when(haState.shouldPopulateReplQueues()).thenReturn(true);
Whitebox.setInternalState(fsn,"haContext",haContext);
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
fsn.enterSafeMode(false);
assertTrue("FSNamesystem didn't enter safemode",fsn.isInSafeMode());
assertTrue("Replication queues were being populated during very first " + "safemode",!fsn.isPopulatingReplQueues());
fsn.leaveSafeMode();
assertTrue("FSNamesystem didn't leave safemode",!fsn.isInSafeMode());
assertTrue("Replication queues weren't being populated even after leaving " + "safemode",fsn.isPopulatingReplQueues());
fsn.enterSafeMode(false);
assertTrue("FSNamesystem didn't enter safemode",fsn.isInSafeMode());
assertTrue("Replication queues weren't being populated after entering " + "safemode 2nd time",fsn.isPopulatingReplQueues());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that FSNamesystem#clear clears all leases.
*/
@Test public void testFSNamespaceClearLeases() throws Exception {
Configuration conf=new HdfsConfiguration();
File nameDir=new File(MiniDFSCluster.getBaseDirectory(),"name");
conf.set(DFS_NAMENODE_NAME_DIR_KEY,nameDir.getAbsolutePath());
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn=FSNamesystem.loadFromDisk(conf);
LeaseManager leaseMan=fsn.getLeaseManager();
leaseMan.addLease("client1","importantFile");
assertEquals(1,leaseMan.countLease());
fsn.clear();
leaseMan=fsn.getLeaseManager();
assertEquals(0,leaseMan.countLease());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Tests that the namenode edits dirs are gotten with duplicates removed
*/
@Test public void testUniqueEditDirs() throws IOException {
Configuration config=new Configuration();
config.set(DFS_NAMENODE_EDITS_DIR_KEY,"file://edits/dir, " + "file://edits/dir1,file://edits/dir1");
Collection editsDirs=FSNamesystem.getNamespaceEditsDirs(config);
assertEquals(2,editsDirs.size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void test() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
FSNamesystem fsn=cluster.getNameNode().namesystem;
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
String snapshotStats=(String)(mbs.getAttribute(mxbeanName,"SnapshotStats"));
@SuppressWarnings("unchecked") Map stat=(Map)JSON.parse(snapshotStats);
assertTrue(stat.containsKey("SnapshottableDirectories") && (Long)stat.get("SnapshottableDirectories") == fsn.getNumSnapshottableDirs());
assertTrue(stat.containsKey("Snapshots") && (Long)stat.get("Snapshots") == fsn.getNumSnapshots());
Object pendingDeletionBlocks=mbs.getAttribute(mxbeanName,"PendingDeletionBlocks");
assertNotNull(pendingDeletionBlocks);
assertTrue(pendingDeletionBlocks instanceof Long);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=180000) public void testFavoredNodesEndToEnd() throws Exception {
for (int i=0; i < NUM_FILES; i++) {
Random rand=new Random(System.currentTimeMillis() + i);
InetSocketAddress datanode[]=getDatanodes(rand);
Path p=new Path("/filename" + i);
FSDataOutputStream out=dfs.create(p,FsPermission.getDefault(),true,4096,(short)3,4096L,null,datanode);
out.write(SOME_BYTES);
out.close();
BlockLocation[] locations=getBlockLocations(p);
for ( BlockLocation loc : locations) {
String[] hosts=loc.getNames();
String[] hosts1=getStringForInetSocketAddrs(datanode);
assertTrue(compareNodes(hosts,hosts1));
}
}
}
APIUtilityVerifier IterativeVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test(timeout=180000) public void testWhenSomeNodesAreNotGood() throws Exception {
final InetSocketAddress addrs[]=new InetSocketAddress[4];
final String[] hosts=new String[addrs.length];
for (int i=0; i < addrs.length; i++) {
addrs[i]=datanodes.get(i).getXferAddress();
hosts[i]=addrs[i].getAddress().getHostAddress() + ":" + addrs[i].getPort();
}
DatanodeInfo d=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanodeByXferAddr(addrs[0].getAddress().getHostAddress(),addrs[0].getPort());
d.setDecommissioned();
Path p=new Path("/filename-foo-bar-baz");
final short replication=(short)3;
FSDataOutputStream out=dfs.create(p,FsPermission.getDefault(),true,4096,replication,4096L,null,addrs);
out.write(SOME_BYTES);
out.close();
d.stopDecommission();
BlockLocation[] locations=getBlockLocations(p);
Assert.assertEquals(replication,locations[0].getNames().length);
;
for (int i=0; i < replication; i++) {
final String loc=locations[0].getNames()[i];
int j=0;
for (; j < hosts.length && !loc.equals(hosts[j]); j++) ;
Assert.assertTrue("j=" + j,j > 0);
Assert.assertTrue("loc=" + loc + " not in host list "+ Arrays.asList(hosts)+ ", j="+ j,j < hosts.length);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that inprogress files are handled correct. Set up a single
* edits directory. Fail on after the last roll. Then verify that the
* logs have the expected number of transactions.
*/
@Test public void testInprogressRecovery() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/inprogressrecovery");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),5,new AbortSpec(5,0));
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that in-progress streams aren't counted if we don't ask for
* them.
*/
@Test public void testExcludeInProgressStreams() throws CorruptionException, IOException {
File f=new File(TestEditLog.TEST_DIR + "/excludeinprogressstreams");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,false);
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(100,getNumberOfTransactions(jm,1,false,false));
EditLogInputStream elis=getJournalInputStream(jm,90,false);
try {
FSEditLogOp lastReadOp=null;
while ((lastReadOp=elis.readOp()) != null) {
assertTrue(lastReadOp.getTransactionId() <= 100);
}
}
finally {
IOUtils.cleanup(LOG,elis);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that we receive the correct number of transactions when we count
* the number of transactions around gaps.
* Set up a single edits directory, with no failures. Delete the 4th logfile.
* Test that getNumberOfTransactions returns the correct number of
* transactions before this gap and after this gap. Also verify that if you
* try to count on the gap that an exception is thrown.
*/
@Test public void testManyLogsWithGaps() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/manylogswithgaps");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10);
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
final long startGapTxId=3 * TXNS_PER_ROLL + 1;
final long endGapTxId=4 * TXNS_PER_ROLL;
File[] files=new File(f,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId,endGapTxId))) {
return true;
}
return false;
}
}
);
assertEquals(1,files.length);
assertTrue(files[0].delete());
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(startGapTxId - 1,getNumberOfTransactions(jm,1,true,true));
assertEquals(0,getNumberOfTransactions(jm,startGapTxId,true,true));
assertEquals(11 * TXNS_PER_ROLL - endGapTxId,getNumberOfTransactions(jm,endGapTxId + 1,true,true));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test the normal operation of loading transactions from
* file journal manager. 3 edits directories are setup without any
* failures. Test that we read in the expected number of transactions.
*/
@Test public void testNormalOperation() throws IOException {
File f1=new File(TestEditLog.TEST_DIR + "/normtest0");
File f2=new File(TestEditLog.TEST_DIR + "/normtest1");
File f3=new File(TestEditLog.TEST_DIR + "/normtest2");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI(),f3.toURI());
NNStorage storage=setupEdits(editUris,5);
long numJournals=0;
for ( StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(6 * TXNS_PER_ROLL,getNumberOfTransactions(jm,1,true,false));
numJournals++;
}
assertEquals(3,numJournals);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier ExceptionVerifier HybridVerifier
@Test(expected=IllegalStateException.class) public void testFinalizeErrorReportedToNNStorage() throws IOException, InterruptedException {
File f=new File(TestEditLog.TEST_DIR + "/filejournaltestError");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,new AbortSpec(10,0));
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
String sdRootPath=sd.getRoot().getAbsolutePath();
FileUtil.chmod(sdRootPath,"-w",true);
try {
jm.finalizeLogSegment(0,1);
}
finally {
FileUtil.chmod(sdRootPath,"+w",true);
assertTrue(storage.getRemovedStorageDirs().contains(sd));
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Make requests with starting transaction ids which don't match the beginning
* txid of some log segments.
* This should succeed.
*/
@Test public void testAskForTransactionsMidfile() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/askfortransactionsmidfile");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10);
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
final int TOTAL_TXIDS=10 * 11;
for (int txid=1; txid <= TOTAL_TXIDS; txid++) {
assertEquals((TOTAL_TXIDS - txid) + 1,getNumberOfTransactions(jm,txid,true,false));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that we can load an edits directory with a corrupt inprogress file.
* The corrupt inprogress file should be moved to the side.
*/
@Test public void testManyLogsWithCorruptInprogress() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/manylogswithcorruptinprogress");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,new AbortSpec(10,0));
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
File[] files=new File(f,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith("edits_inprogress")) {
return true;
}
return false;
}
}
);
assertEquals(files.length,1);
corruptAfterStartSegment(files[0]);
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(10 * TXNS_PER_ROLL + 1,getNumberOfTransactions(jm,1,true,false));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that FileJournalManager behaves correctly despite inprogress
* files in all its edit log directories. Set up 3 directories and fail
* all on the last roll. Verify that the correct number of transaction
* are then loaded.
*/
@Test public void testInprogressRecoveryAll() throws IOException {
File f1=new File(TestEditLog.TEST_DIR + "/failalltest0");
File f2=new File(TestEditLog.TEST_DIR + "/failalltest1");
File f3=new File(TestEditLog.TEST_DIR + "/failalltest2");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI(),f3.toURI());
NNStorage storage=setupEdits(editUris,5,new AbortSpec(5,0),new AbortSpec(5,1),new AbortSpec(5,2));
Iterator dirs=storage.dirIterator(NameNodeDirType.EDITS);
StorageDirectory sd=dirs.next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false));
sd=dirs.next();
jm=new FileJournalManager(conf,sd,storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false));
sd=dirs.next();
jm=new FileJournalManager(conf,sd,storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that we can read from a stream created by FileJournalManager.
* Create a single edits directory, failing it on the final roll.
* Then try loading from the point of the 3rd roll. Verify that we read
* the correct number of transactions from this point.
*/
@Test public void testReadFromStream() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/readfromstream");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,new AbortSpec(10,0));
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
long expectedTotalTxnCount=TXNS_PER_ROLL * 10 + TXNS_PER_FAIL;
assertEquals(expectedTotalTxnCount,getNumberOfTransactions(jm,1,true,false));
long skippedTxns=(3 * TXNS_PER_ROLL);
long startingTxId=skippedTxns + 1;
long numLoadable=getNumberOfTransactions(jm,startingTxId,true,false);
assertEquals(expectedTotalTxnCount - skippedTxns,numLoadable);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Make sure that we starting reading the correct op when we request a stream
* with a txid in the middle of an edit log file.
*/
@Test public void testReadFromMiddleOfEditLog() throws CorruptionException, IOException {
File f=new File(TestEditLog.TEST_DIR + "/readfrommiddleofeditlog");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10);
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
EditLogInputStream elis=getJournalInputStream(jm,5,true);
try {
FSEditLogOp op=elis.readOp();
assertEquals("read unexpected op",op.getTransactionId(),5);
}
finally {
IOUtils.cleanup(LOG,elis);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test a mixture of inprogress files and finalised. Set up 3 edits
* directories and fail the second on the last roll. Verify that reading
* the transactions, reads from the finalised directories.
*/
@Test public void testInprogressRecoveryMixed() throws IOException {
File f1=new File(TestEditLog.TEST_DIR + "/mixtest0");
File f2=new File(TestEditLog.TEST_DIR + "/mixtest1");
File f3=new File(TestEditLog.TEST_DIR + "/mixtest2");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI(),f3.toURI());
NNStorage storage=setupEdits(editUris,5,new AbortSpec(5,1));
Iterator dirs=storage.dirIterator(NameNodeDirType.EDITS);
StorageDirectory sd=dirs.next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(6 * TXNS_PER_ROLL,getNumberOfTransactions(jm,1,true,false));
sd=dirs.next();
jm=new FileJournalManager(conf,sd,storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false));
sd=dirs.next();
jm=new FileJournalManager(conf,sd,storage);
assertEquals(6 * TXNS_PER_ROLL,getNumberOfTransactions(jm,1,true,false));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test if fsck can return -1 in case of failure
* @throws Exception
*/
@Test public void testFsckError() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).build();
String fileName="/test.txt";
Path filePath=new Path(fileName);
FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,1L,(short)1,1L);
DFSTestUtil.waitReplication(fs,filePath,(short)1);
INodeFile node=(INodeFile)cluster.getNamesystem().dir.getNode(fileName,true);
final BlockInfo[] blocks=node.getBlocks();
assertEquals(blocks.length,1);
blocks[0].setNumBytes(-1L);
String outStr=runFsck(conf,-1,true,fileName);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
fs.delete(filePath,true);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test fsck with permission set on inodes
*/
@Test public void testFsckPermission() throws Exception {
final DFSTestUtil util=new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(20).build();
final Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
final MiniDFSCluster c2=cluster;
final String dir="/dfsck";
final Path dirpath=new Path(dir);
final FileSystem fs=c2.getFileSystem();
util.createFiles(fs,dir);
util.waitReplication(fs,dir,(short)3);
fs.setPermission(dirpath,new FsPermission((short)0700));
UserGroupInformation fakeUGI=UserGroupInformation.createUserForTesting("ProbablyNotARealUserName",new String[]{"ShangriLa"});
fakeUGI.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
System.out.println(runFsck(conf,-1,true,dir));
return null;
}
}
);
fs.setPermission(dirpath,new FsPermission((short)0777));
fakeUGI.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final String outStr=runFsck(conf,0,true,dir);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
return null;
}
}
);
util.cleanup(fs,dir);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests that the # of missing block replicas and expected replicas is correct
* @throws IOException
*/
@Test public void testFsckMissingReplicas() throws IOException {
final short REPL_FACTOR=2;
final short NUM_REPLICAS=1;
final short NUM_BLOCKS=3;
final long blockSize=512;
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,blockSize);
MiniDFSCluster cluster=null;
DistributedFileSystem dfs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build();
assertNotNull("Failed Cluster Creation",cluster);
cluster.waitClusterUp();
dfs=cluster.getFileSystem();
assertNotNull("Failed to get FileSystem",dfs);
final String pathString=new String("/testfile");
final Path path=new Path(pathString);
long fileLen=blockSize * NUM_BLOCKS;
DFSTestUtil.createFile(dfs,path,fileLen,REPL_FACTOR,1);
NameNode namenode=cluster.getNameNode();
NetworkTopology nettop=cluster.getNamesystem().getBlockManager().getDatanodeManager().getNetworkTopology();
Map pmap=new HashMap();
Writer result=new StringWriter();
PrintWriter out=new PrintWriter(result,true);
InetAddress remoteAddress=InetAddress.getLocalHost();
NamenodeFsck fsck=new NamenodeFsck(conf,namenode,nettop,pmap,out,NUM_REPLICAS,(short)1,remoteAddress);
final HdfsFileStatus file=namenode.getRpcServer().getFileInfo(pathString);
assertNotNull(file);
Result res=new Result(conf);
fsck.check(pathString,file,res);
System.out.println(result.toString());
assertEquals(res.missingReplicas,(NUM_BLOCKS * REPL_FACTOR) - (NUM_BLOCKS * NUM_REPLICAS));
assertEquals(res.numExpectedReplicas,NUM_BLOCKS * REPL_FACTOR);
}
finally {
if (dfs != null) {
dfs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* do fsck
*/
@Test public void testFsck() throws Exception {
DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFsck").setNumFiles(20).build();
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
final long precision=1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs=cluster.getFileSystem();
final String fileName="/srcdat";
util.createFiles(fs,fileName);
util.waitReplication(fs,fileName,(short)3);
final Path file=new Path(fileName);
long aTime=fs.getFileStatus(file).getAccessTime();
Thread.sleep(precision);
setupAuditLogs();
String outStr=runFsck(conf,0,true,"/");
verifyAuditLogs();
assertEquals(aTime,fs.getFileStatus(file).getAccessTime());
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
outStr=runFsck(conf,1,true,"/");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
System.out.println(outStr);
cluster.startDataNodes(conf,4,true,null,null);
cluster.waitActive();
cluster.waitClusterUp();
fs=cluster.getFileSystem();
util.cleanup(fs,"/srcdat");
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCorruptBlock() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
FileSystem fs=null;
DFSClient dfsClient=null;
LocatedBlocks blocks=null;
int replicaCount=0;
Random random=new Random();
String outStr=null;
short factor=1;
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs=cluster.getFileSystem();
Path file1=new Path("/testCorruptBlock");
DFSTestUtil.createFile(fs,file1,1024,factor,0);
DFSTestUtil.waitReplication(fs,file1,factor);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,file1);
outStr=runFsck(conf,0,true,"/");
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
File blockFile=MiniDFSCluster.getBlockFile(0,block);
if (blockFile != null && blockFile.exists()) {
RandomAccessFile raFile=new RandomAccessFile(blockFile,"rw");
FileChannel channel=raFile.getChannel();
String badString="BADBAD";
int rand=random.nextInt((int)channel.size() / 2);
raFile.seek(rand);
raFile.write(badString.getBytes());
raFile.close();
}
try {
IOUtils.copyBytes(fs.open(file1),new IOUtils.NullOutputStream(),conf,true);
}
catch ( IOException ie) {
}
dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
blocks=dfsClient.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
replicaCount=blocks.get(0).getLocations().length;
while (replicaCount != factor) {
try {
Thread.sleep(100);
}
catch ( InterruptedException ignore) {
}
blocks=dfsClient.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
replicaCount=blocks.get(0).getLocations().length;
}
assertTrue(blocks.get(0).isCorrupt());
outStr=runFsck(conf,1,true,"/");
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
assertTrue(outStr.contains("testCorruptBlock"));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests that the # of misreplaced replicas is correct
* @throws IOException
*/
@Test public void testFsckMisPlacedReplicas() throws IOException {
final short REPL_FACTOR=2;
short NUM_DN=2;
final short NUM_BLOCKS=3;
final long blockSize=512;
String[] racks={"/rack1","/rack1"};
String[] hosts={"host1","host2"};
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,blockSize);
MiniDFSCluster cluster=null;
DistributedFileSystem dfs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts).racks(racks).build();
assertNotNull("Failed Cluster Creation",cluster);
cluster.waitClusterUp();
dfs=cluster.getFileSystem();
assertNotNull("Failed to get FileSystem",dfs);
final String pathString=new String("/testfile");
final Path path=new Path(pathString);
long fileLen=blockSize * NUM_BLOCKS;
DFSTestUtil.createFile(dfs,path,fileLen,REPL_FACTOR,1);
NameNode namenode=cluster.getNameNode();
NetworkTopology nettop=cluster.getNamesystem().getBlockManager().getDatanodeManager().getNetworkTopology();
nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2","/host3"));
NUM_DN++;
Map pmap=new HashMap();
Writer result=new StringWriter();
PrintWriter out=new PrintWriter(result,true);
InetAddress remoteAddress=InetAddress.getLocalHost();
NamenodeFsck fsck=new NamenodeFsck(conf,namenode,nettop,pmap,out,NUM_DN,REPL_FACTOR,remoteAddress);
final HdfsFileStatus file=namenode.getRpcServer().getFileInfo(pathString);
assertNotNull(file);
Result res=new Result(conf);
fsck.check(pathString,file,res);
assertEquals(res.numMisReplicatedBlocks,NUM_BLOCKS);
}
finally {
if (dfs != null) {
dfs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier BooleanVerifier
/**
* Test for including the snapshot files in fsck report
*/
@Test public void testFsckForSnapshotFiles() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
String runFsck=runFsck(conf,0,true,"/","-includeSnapshots","-files");
assertTrue(runFsck.contains("HEALTHY"));
final String fileName="/srcdat";
DistributedFileSystem hdfs=cluster.getFileSystem();
Path file1=new Path(fileName);
DFSTestUtil.createFile(hdfs,file1,1024,(short)1,1000L);
hdfs.allowSnapshot(new Path("/"));
hdfs.createSnapshot(new Path("/"),"mySnapShot");
runFsck=runFsck(conf,0,true,"/","-includeSnapshots","-files");
assertTrue(runFsck.contains("/.snapshot/mySnapShot/srcdat"));
runFsck=runFsck(conf,0,true,"/","-files");
assertFalse(runFsck.contains("mySnapShot"));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier
@Test public void testFsckMoveAndDelete() throws Exception {
final int MAX_MOVE_TRIES=5;
DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFsckMoveAndDelete").setNumFiles(5).build();
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
String topDir="/srcdat";
fs=cluster.getFileSystem();
cluster.waitActive();
util.createFiles(fs,topDir);
util.waitReplication(fs,topDir,(short)3);
String outStr=runFsck(conf,0,true,"/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
String[] fileNames=util.getFileNames(topDir);
DFSClient dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
String corruptFileName=fileNames[0];
ExtendedBlock block=dfsClient.getNamenode().getBlockLocations(corruptFileName,0,Long.MAX_VALUE).get(0).getBlock();
for (int i=0; i < 4; i++) {
File blockFile=MiniDFSCluster.getBlockFile(i,block);
if (blockFile != null && blockFile.exists()) {
assertTrue(blockFile.delete());
}
}
outStr=runFsck(conf,1,false,"/");
while (!outStr.contains(NamenodeFsck.CORRUPT_STATUS)) {
try {
Thread.sleep(100);
}
catch ( InterruptedException ignore) {
}
outStr=runFsck(conf,1,false,"/");
}
for (int i=0; i < MAX_MOVE_TRIES; i++) {
outStr=runFsck(conf,1,true,"/","-move");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
String[] newFileNames=util.getFileNames(topDir);
boolean found=false;
for ( String f : newFileNames) {
if (f.equals(corruptFileName)) {
found=true;
break;
}
}
assertTrue(found);
}
outStr=runFsck(conf,1,true,"/","-move","-delete");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
outStr=runFsck(conf,0,true,"/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
util.cleanup(fs,topDir);
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
cluster.shutdown();
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier
@Test public void testFsckMove() throws Exception {
Configuration conf=new HdfsConfiguration();
final int DFS_BLOCK_SIZE=1024;
final int NUM_DATANODES=4;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DFS_BLOCK_SIZE);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
DFSTestUtil util=new DFSTestUtil("TestFsck",5,3,(5 * DFS_BLOCK_SIZE) + (DFS_BLOCK_SIZE - 1),5 * DFS_BLOCK_SIZE);
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
String topDir="/srcdat";
fs=cluster.getFileSystem();
cluster.waitActive();
util.createFiles(fs,topDir);
util.waitReplication(fs,topDir,(short)3);
String outStr=runFsck(conf,0,true,"/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
DFSClient dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
String fileNames[]=util.getFileNames(topDir);
CorruptedTestFile ctFiles[]=new CorruptedTestFile[]{new CorruptedTestFile(fileNames[0],Sets.newHashSet(0),dfsClient,NUM_DATANODES,DFS_BLOCK_SIZE),new CorruptedTestFile(fileNames[1],Sets.newHashSet(2,3),dfsClient,NUM_DATANODES,DFS_BLOCK_SIZE),new CorruptedTestFile(fileNames[2],Sets.newHashSet(4),dfsClient,NUM_DATANODES,DFS_BLOCK_SIZE),new CorruptedTestFile(fileNames[3],Sets.newHashSet(0,1,2,3),dfsClient,NUM_DATANODES,DFS_BLOCK_SIZE),new CorruptedTestFile(fileNames[4],Sets.newHashSet(1,2,3,4),dfsClient,NUM_DATANODES,DFS_BLOCK_SIZE)};
int totalMissingBlocks=0;
for ( CorruptedTestFile ctFile : ctFiles) {
totalMissingBlocks+=ctFile.getTotalMissingBlocks();
}
for ( CorruptedTestFile ctFile : ctFiles) {
ctFile.removeBlocks();
}
while (true) {
outStr=runFsck(conf,1,false,"/");
String numCorrupt=null;
for ( String line : outStr.split(LINE_SEPARATOR)) {
Matcher m=numCorruptBlocksPattern.matcher(line);
if (m.matches()) {
numCorrupt=m.group(1);
break;
}
}
if (numCorrupt == null) {
throw new IOException("failed to find number of corrupt " + "blocks in fsck output.");
}
if (numCorrupt.equals(Integer.toString(totalMissingBlocks))) {
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
break;
}
try {
Thread.sleep(100);
}
catch ( InterruptedException ignore) {
}
}
outStr=runFsck(conf,1,false,"/","-move");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
for ( CorruptedTestFile ctFile : ctFiles) {
ctFile.checkSalvagedRemains();
}
outStr=runFsck(conf,1,true,"/","-delete");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
outStr=runFsck(conf,0,true,"/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
util.cleanup(fs,topDir);
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier BooleanVerifier
/**
* Test for checking fsck command on illegal arguments should print the proper
* usage.
*/
@Test public void testToCheckTheFsckCommandOnIllegalArguments() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).build();
String fileName="/test.txt";
Path filePath=new Path(fileName);
FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,1L,(short)1,1L);
DFSTestUtil.waitReplication(fs,filePath,(short)1);
String outStr=runFsck(conf,-1,true,fileName,"-thisIsNotAValidFlag");
System.out.println(outStr);
assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
outStr=runFsck(conf,-1,true,"/",fileName);
System.out.println(outStr);
assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
fs.delete(filePath,true);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFsckNonExistent() throws Exception {
DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFsck").setNumFiles(20).build();
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs=cluster.getFileSystem();
util.createFiles(fs,"/srcdat");
util.waitReplication(fs,"/srcdat",(short)3);
String outStr=runFsck(conf,0,true,"/non-existent");
assertEquals(-1,outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
System.out.println(outStr);
util.cleanup(fs,"/srcdat");
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test fsck with symlinks in the filesystem
*/
@Test public void testFsckSymlink() throws Exception {
final DFSTestUtil util=new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
final Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
final long precision=1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs=cluster.getFileSystem();
final String fileName="/srcdat";
util.createFiles(fs,fileName);
final FileContext fc=FileContext.getFileContext(cluster.getConfiguration(0));
final Path file=new Path(fileName);
final Path symlink=new Path("/srcdat-symlink");
fc.createSymlink(file,symlink,false);
util.waitReplication(fs,fileName,(short)3);
long aTime=fc.getFileStatus(symlink).getAccessTime();
Thread.sleep(precision);
setupAuditLogs();
String outStr=runFsck(conf,0,true,"/");
verifyAuditLogs();
assertEquals(aTime,fc.getFileStatus(symlink).getAccessTime());
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertTrue(outStr.contains("Total symlinks:\t\t1"));
util.cleanup(fs,fileName);
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* check if option -list-corruptfiles of fsck command works properly
*/
@Test public void testFsckListCorruptFilesBlocks() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
FileSystem fs=null;
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSTestUtil util=new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).setMaxSize(1024).build();
util.createFiles(fs,"/corruptData",(short)1);
util.waitReplication(fs,"/corruptData",(short)1);
String outStr=runFsck(conf,0,false,"/corruptData","-list-corruptfileblocks");
System.out.println("1. good fsck out: " + outStr);
assertTrue(outStr.contains("has 0 CORRUPT files"));
final String bpid=cluster.getNamesystem().getBlockPoolId();
for (int i=0; i < 4; i++) {
for (int j=0; j <= 1; j++) {
File storageDir=cluster.getInstanceStorageDir(i,j);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
List metadataFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
if (metadataFiles == null) continue;
for ( File metadataFile : metadataFiles) {
File blockFile=Block.metaToBlockFile(metadataFile);
assertTrue("Cannot remove file.",blockFile.delete());
assertTrue("Cannot remove file.",metadataFile.delete());
}
}
}
final NamenodeProtocols namenode=cluster.getNameNodeRpc();
CorruptFileBlocks corruptFileBlocks=namenode.listCorruptFileBlocks("/corruptData",null);
int numCorrupt=corruptFileBlocks.getFiles().length;
while (numCorrupt == 0) {
Thread.sleep(1000);
corruptFileBlocks=namenode.listCorruptFileBlocks("/corruptData",null);
numCorrupt=corruptFileBlocks.getFiles().length;
}
outStr=runFsck(conf,-1,true,"/corruptData","-list-corruptfileblocks");
System.out.println("2. bad fsck out: " + outStr);
assertTrue(outStr.contains("has 3 CORRUPT files"));
util.createFiles(fs,"/goodData");
outStr=runFsck(conf,0,true,"/goodData","-list-corruptfileblocks");
System.out.println("3. good fsck out: " + outStr);
assertTrue(outStr.contains("has 0 CORRUPT files"));
util.cleanup(fs,"/corruptData");
util.cleanup(fs,"/goodData");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testFsckOpenFiles() throws Exception {
DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFsck").setNumFiles(4).build();
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
String topDir="/srcdat";
String randomString="HADOOP ";
fs=cluster.getFileSystem();
cluster.waitActive();
util.createFiles(fs,topDir);
util.waitReplication(fs,topDir,(short)3);
String outStr=runFsck(conf,0,true,"/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
Path openFile=new Path(topDir + "/openFile");
FSDataOutputStream out=fs.create(openFile);
int writeCount=0;
while (writeCount != 100) {
out.write(randomString.getBytes());
writeCount++;
}
outStr=runFsck(conf,0,true,topDir);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertFalse(outStr.contains("OPENFORWRITE"));
outStr=runFsck(conf,0,true,topDir,"-openforwrite");
System.out.println(outStr);
assertTrue(outStr.contains("OPENFORWRITE"));
assertTrue(outStr.contains("openFile"));
out.close();
outStr=runFsck(conf,0,true,topDir);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertFalse(outStr.contains("OPENFORWRITE"));
util.cleanup(fs,topDir);
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
cluster.shutdown();
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHostsExcludeInUI() throws Exception {
Configuration conf=getConf();
short REPLICATION_FACTOR=2;
final Path filePath=new Path("/testFile");
FileSystem localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/temp/decommission");
Path excludeFile=new Path(dir,"exclude");
Path includeFile=new Path(dir,"include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys,excludeFile,"");
DFSTestUtil.writeFile(localFileSys,includeFile,"");
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath());
String racks[]={"/rack1","/rack1","/rack2","/rack2"};
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
try {
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,1L,REPLICATION_FACTOR,1L);
ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
BlockLocation locs[]=fs.getFileBlockLocations(fs.getFileStatus(filePath),0,Long.MAX_VALUE);
String name=locs[0].getNames()[0];
String names=name + "\n" + "localhost:42\n";
LOG.info("adding '" + names + "' to exclude file "+ excludeFile.toUri().getPath());
DFSTestUtil.writeFile(localFileSys,excludeFile,name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs,name);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
String nodes=(String)mbs.getAttribute(mxbeanName,"LiveNodes");
assertTrue("Live nodes should contain the decommissioned node",nodes.contains("Decommissioned"));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHostsIncludeForDeadCount() throws Exception {
Configuration conf=getConf();
FileSystem localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/temp/decommission");
Path excludeFile=new Path(dir,"exclude");
Path includeFile=new Path(dir,"include");
assertTrue(localFileSys.mkdirs(dir));
StringBuilder includeHosts=new StringBuilder();
includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777").append("\n");
DFSTestUtil.writeFile(localFileSys,excludeFile,"");
DFSTestUtil.writeFile(localFileSys,includeFile,includeHosts.toString());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath());
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
assertTrue(ns.getNumDeadDataNodes() == 2);
assertTrue(ns.getNumLiveDataNodes() == 0);
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
String nodes=mbs.getAttribute(mxbeanName,"NumDeadDataNodes") + "";
assertTrue((Integer)mbs.getAttribute(mxbeanName,"NumDeadDataNodes") == 2);
assertTrue((Integer)mbs.getAttribute(mxbeanName,"NumLiveDataNodes") == 0);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testFilesInGetListingOps() throws Exception {
final Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem hdfs=cluster.getFileSystem();
final FSDirectory fsdir=cluster.getNamesystem().getFSDirectory();
hdfs.mkdirs(new Path("/tmp"));
DFSTestUtil.createFile(hdfs,new Path("/tmp/f1"),0,(short)1,0);
DFSTestUtil.createFile(hdfs,new Path("/tmp/f2"),0,(short)1,0);
DFSTestUtil.createFile(hdfs,new Path("/tmp/f3"),0,(short)1,0);
DirectoryListing dl=cluster.getNameNodeRpc().getListing("/tmp",HdfsFileStatus.EMPTY_NAME,false);
assertTrue(dl.getPartialListing().length == 3);
String f2=new String("f2");
dl=cluster.getNameNodeRpc().getListing("/tmp",f2.getBytes(),false);
assertTrue(dl.getPartialListing().length == 1);
INode f2INode=fsdir.getINode("/tmp/f2");
String f2InodePath="/.reserved/.inodes/" + f2INode.getId();
dl=cluster.getNameNodeRpc().getListing("/tmp",f2InodePath.getBytes(),false);
assertTrue(dl.getPartialListing().length == 1);
hdfs.delete(new Path("/tmp/f2"),false);
try {
dl=cluster.getNameNodeRpc().getListing("/tmp",f2InodePath.getBytes(),false);
fail("Didn't get exception for the deleted startAfter token.");
}
catch ( IOException e) {
assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testPreferredBlockSizeUpperBound(){
replication=3;
preferredBlockSize=BLKSIZE_MAXVALUE;
INodeFile inf=createINodeFile(replication,preferredBlockSize);
assertEquals("True has to be returned in this case",BLKSIZE_MAXVALUE,inf.getPreferredBlockSize());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests for addressing files using /.reserved/.inodes/ in file system
* operations.
*/
@Test public void testInodeIdBasedPaths() throws Exception {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs=cluster.getFileSystem();
NamenodeProtocols nnRpc=cluster.getNameNodeRpc();
Path baseDir=getInodePath(INodeId.ROOT_INODE_ID,"testInodeIdBasedPaths");
Path baseDirRegPath=new Path("/testInodeIdBasedPaths");
fs.mkdirs(baseDir);
fs.exists(baseDir);
long baseDirFileId=nnRpc.getFileInfo(baseDir.toString()).getFileId();
Path testFileInodePath=getInodePath(baseDirFileId,"test1");
Path testFileRegularPath=new Path(baseDir,"test1");
final int testFileBlockSize=1024;
FileSystemTestHelper.createFile(fs,testFileInodePath,1,testFileBlockSize);
assertTrue(fs.exists(testFileInodePath));
FsPermission perm=new FsPermission((short)0666);
fs.setPermission(testFileInodePath,perm);
FileStatus fileStatus=fs.getFileStatus(testFileInodePath);
assertEquals(perm,fileStatus.getPermission());
fs.setOwner(testFileInodePath,fileStatus.getOwner(),fileStatus.getGroup());
fs.setTimes(testFileInodePath,0,0);
fileStatus=fs.getFileStatus(testFileInodePath);
assertEquals(0,fileStatus.getModificationTime());
assertEquals(0,fileStatus.getAccessTime());
fs.setReplication(testFileInodePath,(short)3);
fileStatus=fs.getFileStatus(testFileInodePath);
assertEquals(3,fileStatus.getReplication());
fs.setReplication(testFileInodePath,(short)1);
assertEquals(testFileBlockSize,nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
{
fs.isFileClosed(testFileInodePath);
fs.getAclStatus(testFileInodePath);
fs.getXAttrs(testFileInodePath);
fs.listXAttrs(testFileInodePath);
fs.access(testFileInodePath,FsAction.READ_WRITE);
}
String invalidTarget=new Path(baseDir,"invalidTarget").toString();
String link=new Path(baseDir,"link").toString();
testInvalidSymlinkTarget(nnRpc,invalidTarget,link);
String validTarget="/validtarget";
testValidSymlinkTarget(nnRpc,validTarget,link);
fs.append(testFileInodePath);
fs.recoverLease(testFileInodePath);
LocatedBlocks l1=nnRpc.getBlockLocations(testFileInodePath.toString(),0,Long.MAX_VALUE);
LocatedBlocks l2=nnRpc.getBlockLocations(testFileRegularPath.toString(),0,Long.MAX_VALUE);
checkEquals(l1,l2);
Path renameDst=getInodePath(baseDirFileId,"test2");
fileStatus=fs.getFileStatus(testFileInodePath);
fs.rename(testFileInodePath,renameDst);
fs.rename(renameDst,testFileInodePath);
assertEquals(fileStatus,fs.getFileStatus(testFileInodePath));
fs.rename(testFileInodePath,renameDst,Rename.OVERWRITE);
fs.rename(renameDst,testFileInodePath,Rename.OVERWRITE);
assertEquals(fileStatus,fs.getFileStatus(testFileInodePath));
assertEquals(fs.getContentSummary(testFileRegularPath).toString(),fs.getContentSummary(testFileInodePath).toString());
checkEquals(fs.listFiles(baseDirRegPath,false),fs.listFiles(baseDir,false));
fs.delete(testFileInodePath,true);
assertFalse(fs.exists(testFileInodePath));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier EqualityVerifier
/**
* Test for the Replication value. Sets a value and checks if it was set
* correct.
*/
@Test public void testReplication(){
replication=3;
preferredBlockSize=128 * 1024 * 1024;
INodeFile inf=createINodeFile(replication,preferredBlockSize);
assertEquals("True has to be returned in this case",replication,inf.getFileReplication());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDotdotInodePath() throws Exception {
final Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
DFSClient client=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem hdfs=cluster.getFileSystem();
final FSDirectory fsdir=cluster.getNamesystem().getFSDirectory();
final Path dir=new Path("/dir");
hdfs.mkdirs(dir);
long dirId=fsdir.getINode(dir.toString()).getId();
long parentId=fsdir.getINode("/").getId();
String testPath="/.reserved/.inodes/" + dirId + "/..";
client=new DFSClient(NameNode.getAddress(conf),conf);
HdfsFileStatus status=client.getFileInfo(testPath);
assertTrue(parentId == status.getFileId());
testPath="/.reserved/.inodes/" + parentId + "/..";
status=client.getFileInfo(testPath);
assertTrue(parentId == status.getFileId());
}
finally {
IOUtils.cleanup(LOG,client);
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testXAttrFeature(){
replication=3;
preferredBlockSize=128 * 1024 * 1024;
INodeFile inf=createINodeFile(replication,preferredBlockSize);
ImmutableList.Builder builder=new ImmutableList.Builder();
XAttr xAttr=new XAttr.Builder().setNameSpace(XAttr.NameSpace.USER).setName("a1").setValue(new byte[]{0x31,0x32,0x33}).build();
builder.add(xAttr);
XAttrFeature f=new XAttrFeature(builder.build());
inf.addXAttrFeature(f);
XAttrFeature f1=inf.getXAttrFeature();
assertEquals(xAttr,f1.getXAttrs().get(0));
inf.removeXAttrFeature();
f1=inf.getXAttrFeature();
assertEquals(f1,null);
}
APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests for {@link FSDirectory#resolvePath(String,byte[][],FSDirectory)}
*/
@Test public void testInodePath() throws IOException {
String path="/a/b/c";
INode inode=createTreeOfInodes(path);
FSDirectory fsd=Mockito.mock(FSDirectory.class);
Mockito.doReturn(inode).when(fsd).getInode(Mockito.anyLong());
assertEquals("/test",FSDirectory.resolvePath("/test",null,fsd));
byte[][] components=INode.getPathComponents(path);
String resolvedPath=FSDirectory.resolvePath(path,components,fsd);
assertEquals(path,resolvedPath);
components=INode.getPathComponents("/.reserved/.inodes/1");
resolvedPath=FSDirectory.resolvePath(path,components,fsd);
assertEquals(path,resolvedPath);
components=INode.getPathComponents("/.reserved/.inodes/1/");
assertEquals(path,resolvedPath);
components=INode.getPathComponents("/.reserved/.inodes/1/d/e/f");
resolvedPath=FSDirectory.resolvePath(path,components,fsd);
assertEquals("/a/b/c/d/e/f",resolvedPath);
String testPath="/.reserved/.inodes";
components=INode.getPathComponents(testPath);
resolvedPath=FSDirectory.resolvePath(testPath,components,fsd);
assertEquals(testPath,resolvedPath);
testPath="/.reserved/.inodes/" + INodeId.ROOT_INODE_ID;
components=INode.getPathComponents(testPath);
resolvedPath=FSDirectory.resolvePath(testPath,components,fsd);
assertEquals("/",resolvedPath);
testPath="/.invalid/.inodes/1";
components=INode.getPathComponents(testPath);
resolvedPath=FSDirectory.resolvePath(testPath,components,fsd);
assertEquals(testPath,resolvedPath);
Mockito.doReturn(null).when(fsd).getInode(Mockito.anyLong());
testPath="/.reserved/.inodes/1234";
components=INode.getPathComponents(testPath);
try {
String realPath=FSDirectory.resolvePath(testPath,components,fsd);
fail("Path should not be resolved:" + realPath);
}
catch ( IOException e) {
assertTrue(e instanceof FileNotFoundException);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test for the static {@link INodeFile#valueOf(INode,String)}and {@link INodeFileUnderConstruction#valueOf(INode,String)} methods.
* @throws IOException
*/
@Test public void testValueOf() throws IOException {
final String path="/testValueOf";
final short replication=3;
{
final INode from=null;
try {
INodeFile.valueOf(from,path);
fail();
}
catch ( FileNotFoundException fnfe) {
assertTrue(fnfe.getMessage().contains("File does not exist"));
}
try {
INodeDirectory.valueOf(from,path);
fail();
}
catch ( FileNotFoundException e) {
assertTrue(e.getMessage().contains("Directory does not exist"));
}
}
{
final INode from=createINodeFile(replication,preferredBlockSize);
final INodeFile f=INodeFile.valueOf(from,path);
assertTrue(f == from);
try {
INodeDirectory.valueOf(from,path);
fail();
}
catch ( PathIsNotDirectoryException e) {
}
}
{
final INode from=new INodeFile(INodeId.GRANDFATHER_INODE_ID,null,perm,0L,0L,null,replication,1024L);
from.asFile().toUnderConstruction("client","machine");
final INodeFile f=INodeFile.valueOf(from,path);
assertTrue(f == from);
try {
INodeDirectory.valueOf(from,path);
fail();
}
catch ( PathIsNotDirectoryException expected) {
}
}
{
final INode from=new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,null,perm,0L);
try {
INodeFile.valueOf(from,path);
fail();
}
catch ( FileNotFoundException fnfe) {
assertTrue(fnfe.getMessage().contains("Path is not a file"));
}
final INodeDirectory d=INodeDirectory.valueOf(from,path);
assertTrue(d == from);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetFullPathName(){
replication=3;
preferredBlockSize=128 * 1024 * 1024;
INodeFile inf=createINodeFile(replication,preferredBlockSize);
inf.setLocalName(DFSUtil.string2Bytes("f"));
INodeDirectory root=new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,INodeDirectory.ROOT_NAME,perm,0L);
INodeDirectory dir=new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,DFSUtil.string2Bytes("d"),perm,0L);
assertEquals("f",inf.getFullPathName());
dir.addChild(inf);
assertEquals("d" + Path.SEPARATOR + "f",inf.getFullPathName());
root.addChild(dir);
assertEquals(Path.SEPARATOR + "d" + Path.SEPARATOR+ "f",inf.getFullPathName());
assertEquals(Path.SEPARATOR + "d",dir.getFullPathName());
assertEquals(Path.SEPARATOR,root.getFullPathName());
}
APIUtilityVerifier EqualityVerifier
/**
* Test for the PreferredBlockSize value. Sets a value and checks if it was
* set correct.
*/
@Test public void testPreferredBlockSize(){
replication=3;
preferredBlockSize=128 * 1024 * 1024;
INodeFile inf=createINodeFile(replication,preferredBlockSize);
assertEquals("True has to be returned in this case",preferredBlockSize,inf.getPreferredBlockSize());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testLocationLimitInListingOps() throws Exception {
final Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT,9);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
final DistributedFileSystem hdfs=cluster.getFileSystem();
ArrayList source=new ArrayList();
hdfs.mkdirs(new Path("/tmp1"));
hdfs.mkdirs(new Path("/tmp2"));
source.add("f1");
source.add("f2");
int numEntries=source.size();
for (int j=0; j < numEntries; j++) {
DFSTestUtil.createFile(hdfs,new Path("/tmp1/" + source.get(j)),4096,3 * 1024 - 100,1024,(short)3,0);
}
byte[] start=HdfsFileStatus.EMPTY_NAME;
for (int j=0; j < numEntries; j++) {
DirectoryListing dl=cluster.getNameNodeRpc().getListing("/tmp1",start,true);
assertTrue(dl.getPartialListing().length == 1);
for (int i=0; i < dl.getPartialListing().length; i++) {
source.remove(dl.getPartialListing()[i].getLocalName());
}
start=dl.getLastName();
}
assertTrue(source.size() == 0);
source.add("f1");
source.add("f2");
source.add("f3");
source.add("f4");
source.add("f5");
source.add("f6");
numEntries=source.size();
for (int j=0; j < numEntries; j++) {
DFSTestUtil.createFile(hdfs,new Path("/tmp2/" + source.get(j)),4096,3 * 1024 - 100,1024,(short)1,0);
}
start=HdfsFileStatus.EMPTY_NAME;
for (int j=0; j < numEntries / 3; j++) {
DirectoryListing dl=cluster.getNameNodeRpc().getListing("/tmp2",start,true);
assertTrue(dl.getPartialListing().length == 3);
for (int i=0; i < dl.getPartialListing().length; i++) {
source.remove(dl.getPartialListing()[i].getLocalName());
}
start=dl.getLastName();
}
assertTrue(source.size() == 0);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier
/**
* Test whether the inode in inodeMap has been replaced after regular inode
* replacement
*/
@Test public void testInodeReplacement() throws Exception {
final Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem hdfs=cluster.getFileSystem();
final FSDirectory fsdir=cluster.getNamesystem().getFSDirectory();
final Path dir=new Path("/dir");
hdfs.mkdirs(dir);
INodeDirectory dirNode=getDir(fsdir,dir);
INode dirNodeFromNode=fsdir.getInode(dirNode.getId());
assertSame(dirNode,dirNodeFromNode);
hdfs.setQuota(dir,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1);
dirNode=getDir(fsdir,dir);
assertTrue(dirNode.isWithQuota());
dirNodeFromNode=fsdir.getInode(dirNode.getId());
assertSame(dirNode,dirNodeFromNode);
hdfs.setQuota(dir,-1,-1);
dirNode=getDir(fsdir,dir);
dirNodeFromNode=fsdir.getInode(dirNode.getId());
assertSame(dirNode,dirNodeFromNode);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to
* replace the original INodeDirectory. Before HDFS-4243, the parent field of
* all the children INodes of the target INodeDirectory is not changed to
* point to the new INodeDirectoryWithQuota. This testcase tests this
* scenario.
*/
@Test public void testGetFullPathNameAfterSetQuota() throws Exception {
long fileLen=1024;
replication=3;
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(replication).build();
cluster.waitActive();
FSNamesystem fsn=cluster.getNamesystem();
FSDirectory fsdir=fsn.getFSDirectory();
DistributedFileSystem dfs=cluster.getFileSystem();
final Path dir=new Path("/dir");
final Path file=new Path(dir,"file");
DFSTestUtil.createFile(dfs,file,fileLen,replication,0L);
INode fnode=fsdir.getINode(file.toString());
assertEquals(file.toString(),fnode.getFullPathName());
dfs.setQuota(dir,Long.MAX_VALUE - 1,replication * fileLen * 10);
INodeDirectory dirNode=getDir(fsdir,dir);
assertEquals(dir.toString(),dirNode.getFullPathName());
assertTrue(dirNode.isWithQuota());
final Path newDir=new Path("/newdir");
final Path newFile=new Path(newDir,"file");
dfs.rename(dir,newDir,Options.Rename.OVERWRITE);
fnode=fsdir.getINode(newFile.toString());
assertEquals(newFile.toString(),fnode.getFullPathName());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Check that listCorruptFileBlocks works while the namenode is still in safemode.
*/
@Test(timeout=300000) public void testListCorruptFileBlocksInSafeMode() throws Exception {
MiniDFSCluster cluster=null;
Random random=new Random();
try {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,3 * 1000);
conf.setFloat(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,1.5f);
conf.setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,0f);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
cluster=new MiniDFSCluster.Builder(conf).waitSafeMode(false).build();
cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,false);
FileSystem fs=cluster.getFileSystem();
DFSTestUtil util=new DFSTestUtil.Builder().setName("testListCorruptFileBlocksInSafeMode").setNumFiles(2).setMaxLevels(1).setMaxSize(512).build();
util.createFiles(fs,"/srcdat10");
Collection badFiles=cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/",null);
assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting None.",badFiles.size() == 0);
File storageDir=cluster.getInstanceStorageDir(0,0);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,cluster.getNamesystem().getBlockPoolId());
assertTrue("data directory does not exist",data_dir.exists());
List metaFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
assertTrue("Data directory does not contain any blocks or there was an " + "IO error",metaFiles != null && !metaFiles.isEmpty());
File metaFile=metaFiles.get(0);
RandomAccessFile file=new RandomAccessFile(metaFile,"rw");
FileChannel channel=file.getChannel();
long position=channel.size() - 2;
int length=2;
byte[] buffer=new byte[length];
random.nextBytes(buffer);
channel.write(ByteBuffer.wrap(buffer),position);
file.close();
LOG.info("Deliberately corrupting file " + metaFile.getName() + " at offset "+ position+ " length "+ length);
try {
util.checkFiles(fs,"/srcdat10");
}
catch ( BlockMissingException e) {
System.out.println("Received BlockMissingException as expected.");
}
catch ( IOException e) {
assertTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException " + " but received IOException "+ e,false);
}
badFiles=cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/",null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.",badFiles.size() == 1);
cluster.restartNameNode(0);
fs=cluster.getFileSystem();
while (!cluster.getNameNode().namesystem.isPopulatingReplQueues()) {
try {
LOG.info("waiting for replication queues");
Thread.sleep(1000);
}
catch ( InterruptedException ignore) {
}
}
try {
util.checkFiles(fs,"/srcdat10");
}
catch ( BlockMissingException e) {
System.out.println("Received BlockMissingException as expected.");
}
catch ( IOException e) {
assertTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException " + " but received IOException "+ e,false);
}
badFiles=cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/",null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.",badFiles.size() == 1);
assertTrue("Namenode is not in safe mode",cluster.getNameNode().isInSafeMode());
cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,false);
util.cleanup(fs,"/srcdat10");
}
catch ( Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw e;
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* Test if NN.listCorruptFiles() returns the right number of results.
* The corrupt blocks are detected by the BlockPoolSliceScanner.
* Also, test that DFS.listCorruptFileBlocks can make multiple successive
* calls.
*/
@Test(timeout=300000) public void testMaxCorruptFiles() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,3 * 1000);
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
final int maxCorruptFileBlocks=FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED;
DFSTestUtil util=new DFSTestUtil.Builder().setName("testMaxCorruptFiles").setNumFiles(maxCorruptFileBlocks * 3).setMaxLevels(1).setMaxSize(512).build();
util.createFiles(fs,"/srcdat2",(short)1);
util.waitReplication(fs,"/srcdat2",(short)1);
final NameNode namenode=cluster.getNameNode();
Collection badFiles=namenode.getNamesystem().listCorruptFileBlocks("/srcdat2",null);
assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.",badFiles.size() == 0);
final String bpid=cluster.getNamesystem().getBlockPoolId();
for (int i=0; i < 4; i++) {
for (int j=0; j <= 1; j++) {
File storageDir=cluster.getInstanceStorageDir(i,j);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
LOG.info("Removing files from " + data_dir);
List metadataFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
if (metadataFiles == null) continue;
for ( File metadataFile : metadataFiles) {
File blockFile=Block.metaToBlockFile(metadataFile);
assertTrue("Cannot remove file.",blockFile.delete());
assertTrue("Cannot remove file.",metadataFile.delete());
}
}
}
LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner");
cluster.restartDataNodes();
cluster.waitActive();
badFiles=namenode.getNamesystem().listCorruptFileBlocks("/srcdat2",null);
while (badFiles.size() < maxCorruptFileBlocks) {
LOG.info("# of corrupt files is: " + badFiles.size());
Thread.sleep(10000);
badFiles=namenode.getNamesystem().listCorruptFileBlocks("/srcdat2",null);
}
badFiles=namenode.getNamesystem().listCorruptFileBlocks("/srcdat2",null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting "+ maxCorruptFileBlocks+ ".",badFiles.size() == maxCorruptFileBlocks);
CorruptFileBlockIterator iter=(CorruptFileBlockIterator)fs.listCorruptFileBlocks(new Path("/srcdat2"));
int corruptPaths=countPaths(iter);
assertTrue("Expected more than " + maxCorruptFileBlocks + " corrupt file blocks but got "+ corruptPaths,corruptPaths > maxCorruptFileBlocks);
assertTrue("Iterator should have made more than 1 call but made " + iter.getCallsMade(),iter.getCallsMade() > 1);
util.cleanup(fs,"/srcdat2");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=300000) public void testlistCorruptFileBlocks() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
FileSystem fs=null;
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSTestUtil util=new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).setMaxSize(1024).build();
util.createFiles(fs,"/corruptData");
final NameNode namenode=cluster.getNameNode();
Collection corruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/corruptData",null);
int numCorrupt=corruptFileBlocks.size();
assertTrue(numCorrupt == 0);
String bpid=cluster.getNamesystem().getBlockPoolId();
for (int i=0; i < 4; i++) {
for (int j=0; j <= 1; j++) {
File storageDir=cluster.getInstanceStorageDir(i,j);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
List metadataFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
if (metadataFiles == null) continue;
for ( File metadataFile : metadataFiles) {
File blockFile=Block.metaToBlockFile(metadataFile);
LOG.info("Deliberately removing file " + blockFile.getName());
assertTrue("Cannot remove file.",blockFile.delete());
LOG.info("Deliberately removing file " + metadataFile.getName());
assertTrue("Cannot remove file.",metadataFile.delete());
}
}
}
int count=0;
corruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/corruptData",null);
numCorrupt=corruptFileBlocks.size();
while (numCorrupt < 3) {
Thread.sleep(1000);
corruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/corruptData",null);
numCorrupt=corruptFileBlocks.size();
count++;
if (count > 30) break;
}
LOG.info("Namenode has bad files. " + numCorrupt);
assertTrue(numCorrupt == 3);
FSNamesystem.CorruptFileBlockInfo[] cfb=corruptFileBlocks.toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
String[] cookie=new String[]{"1"};
Collection nextCorruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/corruptData",cookie);
FSNamesystem.CorruptFileBlockInfo[] ncfb=nextCorruptFileBlocks.toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
numCorrupt=nextCorruptFileBlocks.size();
assertTrue(numCorrupt == 2);
assertTrue(ncfb[0].block.getBlockName().equalsIgnoreCase(cfb[1].block.getBlockName()));
corruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/corruptData",cookie);
numCorrupt=corruptFileBlocks.size();
assertTrue(numCorrupt == 0);
util.createFiles(fs,"/goodData");
corruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/goodData",null);
numCorrupt=corruptFileBlocks.size();
assertTrue(numCorrupt == 0);
util.cleanup(fs,"/corruptData");
util.cleanup(fs,"/goodData");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* check if nn.getCorruptFiles() returns a file that has corrupted blocks
*/
@Test(timeout=300000) public void testListCorruptFilesCorruptedBlock() throws Exception {
MiniDFSCluster cluster=null;
Random random=new Random();
try {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,3 * 1000);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
DFSTestUtil util=new DFSTestUtil.Builder().setName("testCorruptFilesCorruptedBlock").setNumFiles(2).setMaxLevels(1).setMaxSize(512).build();
util.createFiles(fs,"/srcdat10");
final NameNode namenode=cluster.getNameNode();
Collection badFiles=namenode.getNamesystem().listCorruptFileBlocks("/",null);
assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting None.",badFiles.size() == 0);
String bpid=cluster.getNamesystem().getBlockPoolId();
File storageDir=cluster.getInstanceStorageDir(0,1);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
assertTrue("data directory does not exist",data_dir.exists());
List metaFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
assertTrue("Data directory does not contain any blocks or there was an " + "IO error",metaFiles != null && !metaFiles.isEmpty());
File metaFile=metaFiles.get(0);
RandomAccessFile file=new RandomAccessFile(metaFile,"rw");
FileChannel channel=file.getChannel();
long position=channel.size() - 2;
int length=2;
byte[] buffer=new byte[length];
random.nextBytes(buffer);
channel.write(ByteBuffer.wrap(buffer),position);
file.close();
LOG.info("Deliberately corrupting file " + metaFile.getName() + " at offset "+ position+ " length "+ length);
try {
util.checkFiles(fs,"/srcdat10");
}
catch ( BlockMissingException e) {
System.out.println("Received BlockMissingException as expected.");
}
catch ( IOException e) {
assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " + " but received IOException " + e,false);
}
badFiles=namenode.getNamesystem().listCorruptFileBlocks("/",null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.",badFiles.size() == 1);
util.cleanup(fs,"/srcdat10");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* test listCorruptFileBlocks in DistributedFileSystem
*/
@Test(timeout=300000) public void testlistCorruptFileBlocksDFS() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
FileSystem fs=null;
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DistributedFileSystem dfs=(DistributedFileSystem)fs;
DFSTestUtil util=new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).setMaxSize(1024).build();
util.createFiles(fs,"/corruptData");
RemoteIterator corruptFileBlocks=dfs.listCorruptFileBlocks(new Path("/corruptData"));
int numCorrupt=countPaths(corruptFileBlocks);
assertTrue(numCorrupt == 0);
String bpid=cluster.getNamesystem().getBlockPoolId();
for (int i=0; i < 2; i++) {
File storageDir=cluster.getInstanceStorageDir(0,i);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
List metadataFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
if (metadataFiles == null) continue;
for ( File metadataFile : metadataFiles) {
File blockFile=Block.metaToBlockFile(metadataFile);
LOG.info("Deliberately removing file " + blockFile.getName());
assertTrue("Cannot remove file.",blockFile.delete());
LOG.info("Deliberately removing file " + metadataFile.getName());
assertTrue("Cannot remove file.",metadataFile.delete());
}
}
int count=0;
corruptFileBlocks=dfs.listCorruptFileBlocks(new Path("/corruptData"));
numCorrupt=countPaths(corruptFileBlocks);
while (numCorrupt < 3) {
Thread.sleep(1000);
corruptFileBlocks=dfs.listCorruptFileBlocks(new Path("/corruptData"));
numCorrupt=countPaths(corruptFileBlocks);
count++;
if (count > 30) break;
}
LOG.info("Namenode has bad files. " + numCorrupt);
assertTrue(numCorrupt == 3);
util.cleanup(fs,"/corruptData");
util.cleanup(fs,"/goodData");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier BooleanVerifier
/**
* Tests metasave after delete, to make sure there are no orphaned blocks
*/
@Test public void testMetasaveAfterDelete() throws IOException, InterruptedException {
for (int i=0; i < 2; i++) {
Path file=new Path("/filestatus" + i);
DFSTestUtil.createFile(fileSys,file,1024,1024,blockSize,(short)2,seed);
}
cluster.stopDataNode(1);
Thread.sleep(15000);
namesystem.setReplication("/filestatus0",(short)4);
namesystem.delete("/filestatus0",true);
namesystem.delete("/filestatus1",true);
namesystem.metaSave("metasaveAfterDelete.out.txt");
BufferedReader reader=null;
try {
FileInputStream fstream=new FileInputStream(getLogFile("metasaveAfterDelete.out.txt"));
DataInputStream in=new DataInputStream(fstream);
reader=new BufferedReader(new InputStreamReader(in));
reader.readLine();
String line=reader.readLine();
assertTrue(line.equals("Live Datanodes: 1"));
line=reader.readLine();
assertTrue(line.equals("Dead Datanodes: 1"));
line=reader.readLine();
assertTrue(line.equals("Metasave: Blocks waiting for replication: 0"));
line=reader.readLine();
assertTrue(line.equals("Mis-replicated blocks that have been postponed:"));
line=reader.readLine();
assertTrue(line.equals("Metasave: Blocks being replicated: 0"));
}
finally {
if (reader != null) reader.close();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests metasave
*/
@Test public void testMetaSave() throws IOException, InterruptedException {
for (int i=0; i < 2; i++) {
Path file=new Path("/filestatus" + i);
DFSTestUtil.createFile(fileSys,file,1024,1024,blockSize,(short)2,seed);
}
cluster.stopDataNode(1);
Thread.sleep(15000);
namesystem.setReplication("/filestatus0",(short)4);
namesystem.metaSave("metasave.out.txt");
FileInputStream fstream=new FileInputStream(getLogFile("metasave.out.txt"));
DataInputStream in=new DataInputStream(fstream);
BufferedReader reader=null;
try {
reader=new BufferedReader(new InputStreamReader(in));
String line=reader.readLine();
Assert.assertEquals("3 files and directories, 2 blocks = 5 total filesystem objects",line);
line=reader.readLine();
assertTrue(line.equals("Live Datanodes: 1"));
line=reader.readLine();
assertTrue(line.equals("Dead Datanodes: 1"));
line=reader.readLine();
line=reader.readLine();
assertTrue(line.matches("^/filestatus[01]:.*"));
}
finally {
if (reader != null) reader.close();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings({"unchecked"}) @Test public void testNameNodeMXBeanInfo() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FSNamesystem fsn=cluster.getNameNode().namesystem;
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
FileSystem localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/temp/TestNameNodeMXBean");
Path includeFile=new Path(dir,"include");
assertTrue(localFileSys.mkdirs(dir));
StringBuilder includeHosts=new StringBuilder();
for ( DataNode dn : cluster.getDataNodes()) {
includeHosts.append(dn.getDisplayName()).append("\n");
}
DFSTestUtil.writeFile(localFileSys,includeFile,includeHosts.toString());
conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath());
fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
cluster.stopDataNode(0);
while (fsn.getNumDatanodesInService() != 2) {
try {
Thread.sleep(1000);
}
catch ( InterruptedException e) {
}
}
String clusterId=(String)mbs.getAttribute(mxbeanName,"ClusterId");
assertEquals(fsn.getClusterId(),clusterId);
String blockpoolId=(String)mbs.getAttribute(mxbeanName,"BlockPoolId");
assertEquals(fsn.getBlockPoolId(),blockpoolId);
String version=(String)mbs.getAttribute(mxbeanName,"Version");
assertEquals(fsn.getVersion(),version);
assertTrue(version.equals(VersionInfo.getVersion() + ", r" + VersionInfo.getRevision()));
Long used=(Long)mbs.getAttribute(mxbeanName,"Used");
assertEquals(fsn.getUsed(),used.longValue());
Long total=(Long)mbs.getAttribute(mxbeanName,"Total");
assertEquals(fsn.getTotal(),total.longValue());
String safemode=(String)mbs.getAttribute(mxbeanName,"Safemode");
assertEquals(fsn.getSafemode(),safemode);
Long nondfs=(Long)(mbs.getAttribute(mxbeanName,"NonDfsUsedSpace"));
assertEquals(fsn.getNonDfsUsedSpace(),nondfs.longValue());
Float percentremaining=(Float)(mbs.getAttribute(mxbeanName,"PercentRemaining"));
assertEquals(fsn.getPercentRemaining(),percentremaining.floatValue(),DELTA);
Long totalblocks=(Long)(mbs.getAttribute(mxbeanName,"TotalBlocks"));
assertEquals(fsn.getTotalBlocks(),totalblocks.longValue());
String alivenodeinfo=(String)(mbs.getAttribute(mxbeanName,"LiveNodes"));
Map> liveNodes=(Map>)JSON.parse(alivenodeinfo);
assertTrue(liveNodes.size() > 0);
for ( Map liveNode : liveNodes.values()) {
assertTrue(liveNode.containsKey("nonDfsUsedSpace"));
assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) > 0);
assertTrue(liveNode.containsKey("capacity"));
assertTrue(((Long)liveNode.get("capacity")) > 0);
assertTrue(liveNode.containsKey("numBlocks"));
assertTrue(((Long)liveNode.get("numBlocks")) == 0);
}
assertEquals(fsn.getLiveNodes(),alivenodeinfo);
String deadnodeinfo=(String)(mbs.getAttribute(mxbeanName,"DeadNodes"));
assertEquals(fsn.getDeadNodes(),deadnodeinfo);
Map> deadNodes=(Map>)JSON.parse(deadnodeinfo);
assertTrue(deadNodes.size() > 0);
for ( Map deadNode : deadNodes.values()) {
assertTrue(deadNode.containsKey("lastContact"));
assertTrue(deadNode.containsKey("decommissioned"));
assertTrue(deadNode.containsKey("xferaddr"));
}
String nodeUsage=(String)(mbs.getAttribute(mxbeanName,"NodeUsage"));
assertEquals("Bad value for NodeUsage",fsn.getNodeUsage(),nodeUsage);
String nameJournalStatus=(String)(mbs.getAttribute(mxbeanName,"NameJournalStatus"));
assertEquals("Bad value for NameJournalStatus",fsn.getNameJournalStatus(),nameJournalStatus);
String journalTxnInfo=(String)mbs.getAttribute(mxbeanName,"JournalTransactionInfo");
assertEquals("Bad value for NameTxnIds",fsn.getJournalTransactionInfo(),journalTxnInfo);
String nnStarted=(String)mbs.getAttribute(mxbeanName,"NNStarted");
assertEquals("Bad value for NNStarted",fsn.getNNStarted(),nnStarted);
String compileInfo=(String)mbs.getAttribute(mxbeanName,"CompileInfo");
assertEquals("Bad value for CompileInfo",fsn.getCompileInfo(),compileInfo);
String corruptFiles=(String)(mbs.getAttribute(mxbeanName,"CorruptFiles"));
assertEquals("Bad value for CorruptFiles",fsn.getCorruptFiles(),corruptFiles);
String nameDirStatuses=(String)(mbs.getAttribute(mxbeanName,"NameDirStatuses"));
assertEquals(fsn.getNameDirStatuses(),nameDirStatuses);
Map> statusMap=(Map>)JSON.parse(nameDirStatuses);
Collection nameDirUris=cluster.getNameDirs(0);
for ( URI nameDirUri : nameDirUris) {
File nameDir=new File(nameDirUri);
System.out.println("Checking for the presence of " + nameDir + " in active name dirs.");
assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath()));
}
assertEquals(2,statusMap.get("active").size());
assertEquals(0,statusMap.get("failed").size());
File failedNameDir=new File(nameDirUris.iterator().next());
assertEquals(0,FileUtil.chmod(new File(failedNameDir,"current").getAbsolutePath(),"000"));
cluster.getNameNodeRpc().rollEditLog();
nameDirStatuses=(String)(mbs.getAttribute(mxbeanName,"NameDirStatuses"));
statusMap=(Map>)JSON.parse(nameDirStatuses);
for ( URI nameDirUri : nameDirUris) {
File nameDir=new File(nameDirUri);
String expectedStatus=nameDir.equals(failedNameDir) ? "failed" : "active";
System.out.println("Checking for the presence of " + nameDir + " in "+ expectedStatus+ " name dirs.");
assertTrue(statusMap.get(expectedStatus).containsKey(nameDir.getAbsolutePath()));
}
assertEquals(1,statusMap.get("active").size());
assertEquals(1,statusMap.get("failed").size());
assertEquals(0L,mbs.getAttribute(mxbeanName,"CacheUsed"));
assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() * cluster.getDataNodes().size(),mbs.getAttribute(mxbeanName,"CacheCapacity"));
}
finally {
if (cluster != null) {
for ( URI dir : cluster.getNameDirs(0)) {
FileUtil.chmod(new File(new File(dir),"current").getAbsolutePath(),"755");
}
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier ConditionMatcher
@Test(timeout=300000) public void testServiceRpcBindHostKey() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
LOG.info("Testing without " + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,LOCALHOST_SERVER_ADDRESS);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address=getServiceRpcServerAddress(cluster);
assertThat("Bind address not expected to be wildcard by default.",address,not("/" + WILDCARD_ADDRESS));
}
finally {
if (cluster != null) {
cluster.shutdown();
cluster=null;
}
}
LOG.info("Testing with " + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY,WILDCARD_ADDRESS);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address=getServiceRpcServerAddress(cluster);
assertThat("Bind address " + address + " is not wildcard.",address,is("/" + WILDCARD_ADDRESS));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier ConditionMatcher
@Test(timeout=300000) public void testRpcBindHostKey() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
LOG.info("Testing without " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address=getRpcServerAddress(cluster);
assertThat("Bind address not expected to be wildcard by default.",address,not("/" + WILDCARD_ADDRESS));
}
finally {
if (cluster != null) {
cluster.shutdown();
cluster=null;
}
}
LOG.info("Testing with " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY,WILDCARD_ADDRESS);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address=getRpcServerAddress(cluster);
assertThat("Bind address " + address + " is not wildcard.",address,is("/" + WILDCARD_ADDRESS));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=120000) public void testXAttrSymlinks() throws Exception {
fs.mkdirs(linkParent);
fs.mkdirs(targetParent);
DFSTestUtil.createFile(fs,target,1024,(short)3,0xBEEFl);
fs.createSymlink(target,link,false);
fs.setXAttr(target,name1,value1);
fs.setXAttr(target,name2,value2);
Map xattrs=fs.getXAttrs(link);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(value2,xattrs.get(name2));
fs.setXAttr(link,name3,null);
xattrs=fs.getXAttrs(target);
Assert.assertEquals(xattrs.size(),3);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(value2,xattrs.get(name2));
Assert.assertArrayEquals(new byte[0],xattrs.get(name3));
fs.removeXAttr(link,name1);
xattrs=fs.getXAttrs(target);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value2,xattrs.get(name2));
Assert.assertArrayEquals(new byte[0],xattrs.get(name3));
fs.removeXAttr(target,name3);
xattrs=fs.getXAttrs(link);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(value2,xattrs.get(name2));
fs.delete(linkParent,true);
fs.delete(targetParent,true);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testXceiverCount() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,0);
MiniDFSCluster cluster=null;
final int nodes=8;
final int fileCount=5;
final short fileRepl=3;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(nodes).build();
cluster.waitActive();
final FSNamesystem namesystem=cluster.getNamesystem();
final DatanodeManager dnm=namesystem.getBlockManager().getDatanodeManager();
List datanodes=cluster.getDataNodes();
final DistributedFileSystem fs=cluster.getFileSystem();
triggerHeartbeats(datanodes);
int expectedTotalLoad=nodes;
int expectedInServiceNodes=nodes;
int expectedInServiceLoad=nodes;
assertEquals(nodes,namesystem.getNumLiveDataNodes());
assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService());
assertEquals(expectedTotalLoad,namesystem.getTotalLoad());
assertEquals((double)expectedInServiceLoad / expectedInServiceLoad,namesystem.getInServiceXceiverAverage(),EPSILON);
for (int i=0; i < nodes / 2; i++) {
DataNode dn=datanodes.get(i);
DatanodeDescriptor dnd=dnm.getDatanode(dn.getDatanodeId());
dn.shutdown();
dnd.setLastUpdate(0L);
BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
expectedInServiceNodes--;
assertEquals(expectedInServiceNodes,namesystem.getNumLiveDataNodes());
assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService());
}
cluster.restartDataNodes();
cluster.waitActive();
datanodes=cluster.getDataNodes();
expectedInServiceNodes=nodes;
assertEquals(nodes,datanodes.size());
assertEquals(nodes,namesystem.getNumLiveDataNodes());
assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService());
assertEquals(expectedTotalLoad,namesystem.getTotalLoad());
assertEquals((double)expectedInServiceLoad / expectedInServiceLoad,namesystem.getInServiceXceiverAverage(),EPSILON);
DFSOutputStream[] streams=new DFSOutputStream[fileCount];
for (int i=0; i < fileCount; i++) {
streams[i]=(DFSOutputStream)fs.create(new Path("/f" + i),fileRepl).getWrappedStream();
streams[i].write("1".getBytes());
streams[i].hsync();
expectedTotalLoad+=2 * fileRepl;
expectedInServiceLoad+=2 * fileRepl;
}
triggerHeartbeats(datanodes);
assertEquals(nodes,namesystem.getNumLiveDataNodes());
assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService());
assertEquals(expectedTotalLoad,namesystem.getTotalLoad());
assertEquals((double)expectedInServiceLoad / expectedInServiceNodes,namesystem.getInServiceXceiverAverage(),EPSILON);
for (int i=0; i < fileRepl; i++) {
expectedInServiceNodes--;
DatanodeDescriptor dnd=dnm.getDatanode(datanodes.get(i).getDatanodeId());
expectedInServiceLoad-=dnd.getXceiverCount();
dnm.startDecommission(dnd);
DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
Thread.sleep(100);
assertEquals(nodes,namesystem.getNumLiveDataNodes());
assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService());
assertEquals(expectedTotalLoad,namesystem.getTotalLoad());
assertEquals((double)expectedInServiceLoad / expectedInServiceNodes,namesystem.getInServiceXceiverAverage(),EPSILON);
}
for (int i=0; i < fileCount; i++) {
int decomm=0;
for ( DatanodeInfo dni : streams[i].getPipeline()) {
DatanodeDescriptor dnd=dnm.getDatanode(dni);
expectedTotalLoad-=2;
if (dnd.isDecommissionInProgress() || dnd.isDecommissioned()) {
decomm++;
}
else {
expectedInServiceLoad-=2;
}
}
try {
streams[i].close();
}
catch ( IOException ioe) {
if (decomm < fileRepl) {
throw ioe;
}
}
triggerHeartbeats(datanodes);
assertEquals(nodes,namesystem.getNumLiveDataNodes());
assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService());
assertEquals(expectedTotalLoad,namesystem.getTotalLoad());
assertEquals((double)expectedInServiceLoad / expectedInServiceNodes,namesystem.getInServiceXceiverAverage(),EPSILON);
}
for (int i=0; i < nodes; i++) {
DataNode dn=datanodes.get(i);
dn.shutdown();
DatanodeDescriptor dnDesc=dnm.getDatanode(dn.getDatanodeId());
dnDesc.setLastUpdate(0L);
BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
assertEquals(nodes - 1 - i,namesystem.getNumLiveDataNodes());
if (i >= fileRepl) {
expectedInServiceNodes--;
}
assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService());
double expectedXceiverAvg=(i == nodes - 1) ? 0.0 : 1.0;
assertEquals((double)expectedXceiverAvg,namesystem.getInServiceXceiverAverage(),EPSILON);
}
assertEquals(0,namesystem.getNumLiveDataNodes());
assertEquals(0,namesystem.getNumDatanodesInService());
assertEquals(0.0,namesystem.getTotalLoad(),EPSILON);
assertEquals(0.0,namesystem.getInServiceXceiverAverage(),EPSILON);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* After run a set of operations, restart NN and check if the retry cache has
* been rebuilt based on the editlog.
*/
@Test public void testRetryCacheRebuild() throws Exception {
DFSTestUtil.runOperations(cluster,filesystem,conf,BlockSize,0);
LightWeightCache cacheSet=(LightWeightCache)namesystem.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
Map oldEntries=new HashMap();
Iterator iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
oldEntries.put(entry,entry);
}
cluster.restartNameNode();
cluster.waitActive();
namesystem=cluster.getNamesystem();
assertTrue(namesystem.hasRetryCache());
cacheSet=(LightWeightCache)namesystem.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
assertTrue(oldEntries.containsKey(entry));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* check if DFS remains in proper condition after a restart
*/
@Test public void testRestartDFS() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FSNamesystem fsn=null;
int numNamenodeDirs;
DFSTestUtil files=new DFSTestUtil.Builder().setName("TestRestartDFS").setNumFiles(200).build();
final String dir="/srcdat";
final Path rootpath=new Path("/");
final Path dirpath=new Path(dir);
long rootmtime;
FileStatus rootstatus;
FileStatus dirstatus;
try {
cluster=new MiniDFSCluster.Builder(conf).format(true).numDataNodes(NUM_DATANODES).build();
String[] nameNodeDirs=conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,new String[]{});
numNamenodeDirs=nameNodeDirs.length;
assertTrue("failed to get number of Namenode StorageDirs",numNamenodeDirs != 0);
FileSystem fs=cluster.getFileSystem();
files.createFiles(fs,dir);
rootmtime=fs.getFileStatus(rootpath).getModificationTime();
rootstatus=fs.getFileStatus(dirpath);
dirstatus=fs.getFileStatus(dirpath);
fs.setOwner(rootpath,rootstatus.getOwner() + "_XXX",null);
fs.setOwner(dirpath,null,dirstatus.getGroup() + "_XXX");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
try {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY,1);
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(NUM_DATANODES).build();
fsn=cluster.getNamesystem();
FileSystem fs=cluster.getFileSystem();
assertTrue("Filesystem corrupted after restart.",files.checkFiles(fs,dir));
final FileStatus newrootstatus=fs.getFileStatus(rootpath);
assertEquals(rootmtime,newrootstatus.getModificationTime());
assertEquals(rootstatus.getOwner() + "_XXX",newrootstatus.getOwner());
assertEquals(rootstatus.getGroup(),newrootstatus.getGroup());
final FileStatus newdirstatus=fs.getFileStatus(dirpath);
assertEquals(dirstatus.getOwner(),newdirstatus.getOwner());
assertEquals(dirstatus.getGroup() + "_XXX",newdirstatus.getGroup());
rootmtime=fs.getFileStatus(rootpath).getModificationTime();
final String checkAfterRestart=checkImages(fsn,numNamenodeDirs);
files.cleanup(fs,dir);
files.createFiles(fs,dir);
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
cluster.getNameNodeRpc().saveNamespace();
final String checkAfterModify=checkImages(fsn,numNamenodeDirs);
assertFalse("Modified namespace should change fsimage contents. " + "was: " + checkAfterRestart + " now: "+ checkAfterModify,checkAfterRestart.equals(checkAfterModify));
fsn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
files.cleanup(fs,dir);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* The corrupt block has to be removed when the number of valid replicas
* matches replication factor for the file. In this the above condition is
* tested by reducing the replication factor
* The test strategy :
* Bring up Cluster with 3 DataNodes
* Create a file of replication factor 3
* Corrupt one replica of a block of the file
* Verify that there are still 2 good replicas and 1 corrupt replica
* (corrupt replica should not be removed since number of good
* replicas (2) is less than replication factor (3))
* Set the replication factor to 2
* Verify that the corrupt replica is removed.
* (corrupt replica should not be removed since number of good
* replicas (2) is equal to replication factor (2))
*/
@Test public void testWhenDecreasingReplication() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
try {
final Path fileName=new Path("/foo1");
DFSTestUtil.createFile(fs,fileName,2,(short)3,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)3);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName);
corruptBlock(cluster,fs,fileName,0,block);
DFSTestUtil.waitReplication(fs,fileName,(short)2);
assertEquals(2,countReplicas(namesystem,block).liveReplicas());
assertEquals(1,countReplicas(namesystem,block).corruptReplicas());
namesystem.setReplication(fileName.toString(),(short)2);
try {
Thread.sleep(3000);
}
catch ( InterruptedException ignored) {
}
assertEquals(2,countReplicas(namesystem,block).liveReplicas());
assertEquals(0,countReplicas(namesystem,block).corruptReplicas());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* None of the blocks can be removed if all blocks are corrupt.
* The test strategy :
* Bring up Cluster with 3 DataNodes
* Create a file of replication factor 3
* Corrupt all three replicas
* Verify that all replicas are corrupt and 3 replicas are present.
* Set the replication factor to 1
* Verify that all replicas are corrupt and 3 replicas are present.
*/
@Test public void testWithAllCorruptReplicas() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
try {
final Path fileName=new Path("/foo1");
DFSTestUtil.createFile(fs,fileName,2,(short)3,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)3);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName);
corruptBlock(cluster,fs,fileName,0,block);
corruptBlock(cluster,fs,fileName,1,block);
corruptBlock(cluster,fs,fileName,2,block);
try {
Thread.sleep(3000);
}
catch ( InterruptedException ignored) {
}
assertEquals(0,countReplicas(namesystem,block).liveReplicas());
assertEquals(3,countReplicas(namesystem,block).corruptReplicas());
namesystem.setReplication(fileName.toString(),(short)1);
try {
Thread.sleep(3000);
}
catch ( InterruptedException ignored) {
}
assertEquals(0,countReplicas(namesystem,block).liveReplicas());
assertEquals(3,countReplicas(namesystem,block).corruptReplicas());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* The corrupt block has to be removed when the number of valid replicas
* matches replication factor for the file. In this test, the above
* condition is achieved by increasing the number of good replicas by
* replicating on a new Datanode.
* The test strategy :
* Bring up Cluster with 3 DataNodes
* Create a file of replication factor 3
* Corrupt one replica of a block of the file
* Verify that there are still 2 good replicas and 1 corrupt replica
* (corrupt replica should not be removed since number of good replicas
* (2) is less than replication factor (3))
* Start a new data node
* Verify that the a new replica is created and corrupt replica is
* removed.
*/
@Test public void testByAddingAnExtraDataNode() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem fs=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
DataNodeProperties dnPropsFourth=cluster.stopDataNode(3);
try {
final Path fileName=new Path("/foo1");
DFSTestUtil.createFile(fs,fileName,2,(short)3,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)3);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName);
corruptBlock(cluster,fs,fileName,0,block);
DFSTestUtil.waitReplication(fs,fileName,(short)2);
assertEquals(2,countReplicas(namesystem,block).liveReplicas());
assertEquals(1,countReplicas(namesystem,block).corruptReplicas());
cluster.restartDataNode(dnPropsFourth);
DFSTestUtil.waitReplication(fs,fileName,(short)3);
assertEquals(3,countReplicas(namesystem,block).liveReplicas());
assertEquals(0,countReplicas(namesystem,block).corruptReplicas());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* The corrupt block has to be removed when the number of valid replicas
* matches replication factor for the file. The above condition should hold
* true as long as there is one good replica. This test verifies that.
* The test strategy :
* Bring up Cluster with 2 DataNodes
* Create a file of replication factor 2
* Corrupt one replica of a block of the file
* Verify that there is one good replicas and 1 corrupt replica
* (corrupt replica should not be removed since number of good
* replicas (1) is less than replication factor (2)).
* Set the replication factor to 1
* Verify that the corrupt replica is removed.
* (corrupt replica should be removed since number of good
* replicas (1) is equal to replication factor (1))
*/
@Test(timeout=20000) public void testWithReplicationFactorAsOne() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
try {
final Path fileName=new Path("/foo1");
DFSTestUtil.createFile(fs,fileName,2,(short)2,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)2);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName);
corruptBlock(cluster,fs,fileName,0,block);
DFSTestUtil.waitReplication(fs,fileName,(short)1);
assertEquals(1,countReplicas(namesystem,block).liveReplicas());
assertEquals(1,countReplicas(namesystem,block).corruptReplicas());
namesystem.setReplication(fileName.toString(),(short)1);
for (int i=0; i < 10; i++) {
try {
Thread.sleep(1000);
}
catch ( InterruptedException ignored) {
}
if (countReplicas(namesystem,block).corruptReplicas() == 0) {
break;
}
}
assertEquals(1,countReplicas(namesystem,block).liveReplicas());
assertEquals(0,countReplicas(namesystem,block).corruptReplicas());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Verify that a saveNamespace command brings faulty directories
* in fs.name.dir and fs.edit.dir back online.
*/
@Test(timeout=30000) public void testReinsertnamedirsInSavenamespace() throws Exception {
Configuration conf=getConf();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,true);
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn=FSNamesystem.loadFromDisk(conf);
FSImage originalImage=fsn.getFSImage();
NNStorage storage=originalImage.getStorage();
FSImage spyImage=spy(originalImage);
Whitebox.setInternalState(fsn,"fsImage",spyImage);
FileSystem fs=FileSystem.getLocal(conf);
File rootDir=storage.getStorageDir(0).getRoot();
Path rootPath=new Path(rootDir.getPath(),"current");
final FsPermission permissionNone=new FsPermission((short)0);
final FsPermission permissionAll=new FsPermission(FsAction.ALL,FsAction.READ_EXECUTE,FsAction.READ_EXECUTE);
fs.setPermission(rootPath,permissionNone);
try {
doAnEdit(fsn,1);
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
LOG.info("Doing the first savenamespace.");
fsn.saveNamespace();
LOG.info("First savenamespace sucessful.");
assertTrue("Savenamespace should have marked one directory as bad." + " But found " + storage.getRemovedStorageDirs().size() + " bad directories.",storage.getRemovedStorageDirs().size() == 1);
fs.setPermission(rootPath,permissionAll);
LOG.info("Doing the second savenamespace.");
fsn.saveNamespace();
LOG.warn("Second savenamespace sucessful.");
assertTrue("Savenamespace should have been successful in removing " + " bad directories from Image." + " But found " + storage.getRemovedStorageDirs().size() + " bad directories.",storage.getRemovedStorageDirs().size() == 0);
LOG.info("Shutting down fsimage.");
originalImage.close();
fsn.close();
fsn=null;
LOG.info("Loading new FSmage from disk.");
fsn=FSNamesystem.loadFromDisk(conf);
LOG.info("Checking reloaded image.");
checkEditExists(fsn,1);
LOG.info("Reloaded image is good.");
}
finally {
if (rootDir.exists()) {
fs.setPermission(rootPath,permissionAll);
}
if (fsn != null) {
try {
fsn.close();
}
catch ( Throwable t) {
LOG.fatal("Failed to shut down",t);
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testTxIdPersistence() throws Exception {
Configuration conf=getConf();
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn=FSNamesystem.loadFromDisk(conf);
try {
assertEquals(1,fsn.getEditLog().getLastWrittenTxId());
doAnEdit(fsn,1);
assertEquals(2,fsn.getEditLog().getLastWrittenTxId());
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fsn.saveNamespace();
assertEquals(4,fsn.getEditLog().getLastWrittenTxId());
fsn.getFSImage().close();
fsn.close();
assertEquals(5,fsn.getEditLog().getLastWrittenTxId());
fsn=null;
fsn=FSNamesystem.loadFromDisk(conf);
assertEquals(6,fsn.getEditLog().getLastWrittenTxId());
}
finally {
if (fsn != null) {
fsn.close();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSecondaryWebUi() throws IOException, MalformedObjectNameException, AttributeNotFoundException, MBeanException, ReflectionException, InstanceNotFoundException {
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=SecondaryNameNode,name=SecondaryNameNodeInfo");
String[] checkpointDir=(String[])mbs.getAttribute(mxbeanName,"CheckpointDirectories");
Assert.assertArrayEquals(checkpointDir,snn.getCheckpointDirectories());
String[] checkpointEditlogDir=(String[])mbs.getAttribute(mxbeanName,"CheckpointEditlogDirectories");
Assert.assertArrayEquals(checkpointEditlogDir,snn.getCheckpointEditlogDirectories());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testName() throws IOException, InterruptedException {
MiniDFSCluster cluster=null;
try {
String keyTabDir=System.getProperty("kdc.resource.dir") + "/keytabs";
String nn1KeytabPath=keyTabDir + "/nn1.keytab";
String user1KeyTabPath=keyTabDir + "/user1.keytab";
Configuration conf=new HdfsConfiguration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,"nn1/localhost@EXAMPLE.COM");
conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,nn1KeytabPath);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
final MiniDFSCluster clusterRef=cluster;
cluster.waitActive();
FileSystem fsForCurrentUser=cluster.getFileSystem();
fsForCurrentUser.mkdirs(new Path("/tmp"));
fsForCurrentUser.setPermission(new Path("/tmp"),new FsPermission((short)511));
UserGroupInformation ugi=UserGroupInformation.loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM",user1KeyTabPath);
FileSystem fs=ugi.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return clusterRef.getFileSystem();
}
}
);
try {
Path p=new Path("/users");
fs.mkdirs(p);
fail("user1 must not be allowed to write in /");
}
catch ( IOException expected) {
}
Path p=new Path("/tmp/alpha");
fs.mkdirs(p);
assertNotNull(fs.listStatus(p));
assertEquals(AuthenticationMethod.KERBEROS,ugi.getAuthenticationMethod());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testSecureNameNode() throws IOException, InterruptedException {
MiniDFSCluster cluster=null;
try {
String nnPrincipal=System.getProperty("dfs.namenode.kerberos.principal");
String nnSpnegoPrincipal=System.getProperty("dfs.namenode.kerberos.internal.spnego.principal");
String nnKeyTab=System.getProperty("dfs.namenode.keytab.file");
assertNotNull("NameNode principal was not specified",nnPrincipal);
assertNotNull("NameNode SPNEGO principal was not specified",nnSpnegoPrincipal);
assertNotNull("NameNode keytab was not specified",nnKeyTab);
Configuration conf=new HdfsConfiguration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,nnPrincipal);
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,nnSpnegoPrincipal);
conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,nnKeyTab);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
final MiniDFSCluster clusterRef=cluster;
cluster.waitActive();
FileSystem fsForCurrentUser=cluster.getFileSystem();
fsForCurrentUser.mkdirs(new Path("/tmp"));
fsForCurrentUser.setPermission(new Path("/tmp"),new FsPermission((short)511));
String userPrincipal=System.getProperty("user.principal");
String userKeyTab=System.getProperty("user.keytab");
assertNotNull("User principal was not specified",userPrincipal);
assertNotNull("User keytab was not specified",userKeyTab);
UserGroupInformation ugi=UserGroupInformation.loginUserFromKeytabAndReturnUGI(userPrincipal,userKeyTab);
FileSystem fs=ugi.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return clusterRef.getFileSystem();
}
}
);
try {
Path p=new Path("/users");
fs.mkdirs(p);
fail("User must not be allowed to write in /");
}
catch ( IOException expected) {
}
Path p=new Path("/tmp/alpha");
fs.mkdirs(p);
assertNotNull(fs.listStatus(p));
assertEquals(AuthenticationMethod.KERBEROS,ugi.getAuthenticationMethod());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Tests transaction logging in dfs.
*/
@Test public void testEditLog() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
try {
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,true);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
for (Iterator it=cluster.getNameDirs(0).iterator(); it.hasNext(); ) {
File dir=new File(it.next().getPath());
System.out.println(dir);
}
FSImage fsimage=namesystem.getFSImage();
FSEditLog editLog=fsimage.getEditLog();
editLog.setOutputBufferCapacity(2048);
Thread threadId[]=new Thread[NUM_THREADS];
for (int i=0; i < NUM_THREADS; i++) {
Transactions trans=new Transactions(namesystem,NUM_TRANSACTIONS);
threadId[i]=new Thread(trans,"TransactionThread-" + i);
threadId[i].start();
}
for (int i=0; i < NUM_THREADS; i++) {
try {
threadId[i].join();
}
catch ( InterruptedException e) {
i--;
}
}
editLog.close();
namesystem.getDelegationTokenSecretManager().stopThreads();
int numKeys=namesystem.getDelegationTokenSecretManager().getNumberOfKeys();
int expectedTransactions=NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS + numKeys + 2;
for ( StorageDirectory sd : fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) {
File editFile=NNStorage.getFinalizedEditsFile(sd,1,1 + expectedTransactions - 1);
System.out.println("Verifying file: " + editFile);
FSEditLogLoader loader=new FSEditLogLoader(namesystem,0);
long numEdits=loader.loadFSEdits(new EditLogFileInputStream(editFile),1);
assertEquals("Verification for " + editFile,expectedTransactions,numEdits);
}
}
finally {
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier ConditionMatcher
/**
* Test that collisions in the block ID space are handled gracefully.
* @throws IOException
*/
@Test public void testTriggerBlockIdCollision() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
FSNamesystem fsn=cluster.getNamesystem();
final int blockCount=10;
Path path1=new Path("testBlockIdCollisionDetection_file1.dat");
DFSTestUtil.createFile(fs,path1,IO_SIZE,BLOCK_SIZE * blockCount,BLOCK_SIZE,REPLICATION,SEED);
List blocks1=DFSTestUtil.getAllBlocks(fs,path1);
SequentialBlockIdGenerator blockIdGenerator=fsn.getBlockIdGenerator();
blockIdGenerator.setCurrentValue(blockIdGenerator.getCurrentValue() - 5);
Path path2=new Path("testBlockIdCollisionDetection_file2.dat");
DFSTestUtil.createFile(fs,path2,IO_SIZE,BLOCK_SIZE * blockCount,BLOCK_SIZE,REPLICATION,SEED);
List blocks2=DFSTestUtil.getAllBlocks(fs,path2);
assertThat(blocks2.size(),is(blockCount));
assertThat(blocks2.get(0).getBlock().getBlockId(),is(blocks1.get(9).getBlock().getBlockId() + 1));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier ConditionMatcher
/**
* Test that block IDs are generated sequentially.
* @throws IOException
*/
@Test public void testBlockIdGeneration() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path path=new Path("testBlockIdGeneration.dat");
DFSTestUtil.createFile(fs,path,IO_SIZE,BLOCK_SIZE * 10,BLOCK_SIZE,REPLICATION,SEED);
List blocks=DFSTestUtil.getAllBlocks(fs,path);
LOG.info("Block0 id is " + blocks.get(0).getBlock().getBlockId());
long nextBlockExpectedId=blocks.get(0).getBlock().getBlockId() + 1;
for (int i=1; i < blocks.size(); ++i) {
long nextBlockId=blocks.get(i).getBlock().getBlockId();
LOG.info("Block" + i + " id is "+ nextBlockId);
assertThat(nextBlockId,is(nextBlockExpectedId));
++nextBlockExpectedId;
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)}
* for snapshot file while modifying file after snapshot.
*/
@Test(timeout=15000) public void testSnapshotPathINodesAfterModification() throws Exception {
String[] names=INode.getPathNames(file1.toString());
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length);
assertEquals(inodes[components.length - 1].getFullPathName(),file1.toString());
final long modTime=inodes[inodes.length - 1].getModificationTime();
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1,"s3");
DFSTestUtil.appendFile(hdfs,file1,"the content for appending");
String snapshotPath=sub1.toString() + "/.snapshot/s3/file1";
names=INode.getPathNames(snapshotPath);
components=INode.getPathComponents(names);
INodesInPath ssNodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] ssInodes=ssNodesInPath.getINodes();
assertEquals(ssInodes.length,components.length - 1);
final Snapshot s3=getSnapshot(ssNodesInPath,"s3");
assertSnapshot(ssNodesInPath,true,s3,3);
INode snapshotFileNode=ssInodes[ssInodes.length - 1];
assertEquals(snapshotFileNode.getLocalName(),file1.getName());
assertTrue(snapshotFileNode.asFile().isWithSnapshot());
assertEquals(modTime,snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshotId()));
names=INode.getPathNames(file1.toString());
components=INode.getPathComponents(names);
INodesInPath newNodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
assertSnapshot(newNodesInPath,false,s3,-1);
INode[] newInodes=newNodesInPath.getINodes();
assertEquals(newInodes.length,components.length);
final int last=components.length - 1;
assertEquals(newInodes[last].getFullPathName(),file1.toString());
Assert.assertFalse(modTime == newInodes[last].getModificationTime());
hdfs.deleteSnapshot(sub1,"s3");
hdfs.disallowSnapshot(sub1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)}
* for normal (non-snapshot) file.
*/
@Test(timeout=15000) public void testNonSnapshotPathINodes() throws Exception {
String[] names=INode.getPathNames(file1.toString());
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length);
assertSnapshot(nodesInPath,false,null,-1);
assertTrue("file1=" + file1 + ", nodesInPath="+ nodesInPath,inodes[components.length - 1] != null);
assertEquals(inodes[components.length - 1].getFullPathName(),file1.toString());
assertEquals(inodes[components.length - 2].getFullPathName(),sub1.toString());
assertEquals(inodes[components.length - 3].getFullPathName(),dir.toString());
nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,1,false);
inodes=nodesInPath.getINodes();
assertEquals(inodes.length,1);
assertSnapshot(nodesInPath,false,null,-1);
assertEquals(inodes[0].getFullPathName(),file1.toString());
nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,2,false);
inodes=nodesInPath.getINodes();
assertEquals(inodes.length,2);
assertSnapshot(nodesInPath,false,null,-1);
assertEquals(inodes[1].getFullPathName(),file1.toString());
assertEquals(inodes[0].getFullPathName(),sub1.toString());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)}
* for snapshot file while adding a new file after snapshot.
*/
@Test(timeout=15000) public void testSnapshotPathINodesWithAddedFile() throws Exception {
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1,"s4");
final Path file3=new Path(sub1,"file3");
DFSTestUtil.createFile(hdfs,file3,1024,REPLICATION,seed);
{
String snapshotPath=sub1.toString() + "/.snapshot/s4/file3";
String[] names=INode.getPathNames(snapshotPath);
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length - 1);
assertEquals(nodesInPath.getNumNonNull(),components.length - 2);
s4=getSnapshot(nodesInPath,"s4");
assertSnapshot(nodesInPath,true,s4,3);
assertNull(inodes[inodes.length - 1]);
}
String[] names=INode.getPathNames(file3.toString());
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length);
assertSnapshot(nodesInPath,false,s4,-1);
assertEquals(inodes[components.length - 1].getFullPathName(),file3.toString());
assertEquals(inodes[components.length - 2].getFullPathName(),sub1.toString());
assertEquals(inodes[components.length - 3].getFullPathName(),dir.toString());
hdfs.deleteSnapshot(sub1,"s4");
hdfs.disallowSnapshot(sub1);
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)}
* for snapshot file.
*/
@Test(timeout=15000) public void testSnapshotPathINodes() throws Exception {
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1,"s1");
String snapshotPath=sub1.toString() + "/.snapshot/s1/file1";
String[] names=INode.getPathNames(snapshotPath);
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length - 1);
final Snapshot snapshot=getSnapshot(nodesInPath,"s1");
assertSnapshot(nodesInPath,true,snapshot,3);
INode snapshotFileNode=inodes[inodes.length - 1];
assertINodeFile(snapshotFileNode,file1);
assertTrue(snapshotFileNode.getParent().isWithSnapshot());
nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,1,false);
inodes=nodesInPath.getINodes();
assertEquals(inodes.length,1);
assertSnapshot(nodesInPath,true,snapshot,-1);
assertINodeFile(nodesInPath.getLastINode(),file1);
nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,2,false);
inodes=nodesInPath.getINodes();
assertEquals(inodes.length,2);
assertSnapshot(nodesInPath,true,snapshot,0);
assertINodeFile(nodesInPath.getLastINode(),file1);
String dotSnapshotPath=sub1.toString() + "/.snapshot";
names=INode.getPathNames(dotSnapshotPath);
components=INode.getPathComponents(names);
nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length - 1);
assertSnapshot(nodesInPath,true,snapshot,-1);
final INode last=nodesInPath.getLastINode();
assertEquals(last.getFullPathName(),sub1.toString());
assertFalse(last instanceof INodeFile);
String[] invalidPathComponent={"invalidDir","foo",".snapshot","bar"};
Path invalidPath=new Path(invalidPathComponent[0]);
for (int i=1; i < invalidPathComponent.length; i++) {
invalidPath=new Path(invalidPath,invalidPathComponent[i]);
try {
hdfs.getFileStatus(invalidPath);
Assert.fail();
}
catch ( FileNotFoundException fnfe) {
System.out.println("The exception is expected: " + fnfe);
}
}
hdfs.deleteSnapshot(sub1,"s1");
hdfs.disallowSnapshot(sub1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)}
* for snapshot file after deleting the original file.
*/
@Test(timeout=15000) public void testSnapshotPathINodesAfterDeletion() throws Exception {
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1,"s2");
hdfs.delete(file1,false);
final Snapshot snapshot;
{
String snapshotPath=sub1.toString() + "/.snapshot/s2/file1";
String[] names=INode.getPathNames(snapshotPath);
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length - 1);
snapshot=getSnapshot(nodesInPath,"s2");
assertSnapshot(nodesInPath,true,snapshot,3);
final INode inode=inodes[inodes.length - 1];
assertEquals(file1.getName(),inode.getLocalName());
assertTrue(inode.asFile().isWithSnapshot());
}
String[] names=INode.getPathNames(file1.toString());
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length);
assertEquals(nodesInPath.getNumNonNull(),components.length - 1);
assertSnapshot(nodesInPath,false,snapshot,-1);
assertNull(inodes[components.length - 1]);
assertEquals(inodes[components.length - 2].getFullPathName(),sub1.toString());
assertEquals(inodes[components.length - 3].getFullPathName(),dir.toString());
hdfs.deleteSnapshot(sub1,"s2");
hdfs.disallowSnapshot(sub1);
}
APIUtilityVerifier EqualityVerifier
/**
* Verify the following scenario.
* 1. NN restarts.
* 2. Heartbeat RPC will retry and succeed. NN asks DN to reregister.
* 3. After reregistration completes, DN will send Heartbeat, followed by
* Blockreport.
* 4. NN will mark DatanodeStorageInfo#blockContentsStale to false.
* @throws Exception
*/
@Test(timeout=60000) public void testStorageBlockContentsStaleAfterNNRestart() throws Exception {
MiniDFSCluster dfsCluster=null;
try {
Configuration config=new Configuration();
dfsCluster=new MiniDFSCluster.Builder(config).numDataNodes(1).build();
dfsCluster.waitActive();
dfsCluster.restartNameNode(true);
BlockManagerTestUtil.checkHeartbeat(dfsCluster.getNamesystem().getBlockManager());
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanNameFsns=new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
Integer numStaleStorages=(Integer)(mbs.getAttribute(mxbeanNameFsns,"NumStaleStorages"));
assertEquals(0,numStaleStorages.intValue());
}
finally {
if (dfsCluster != null) {
dfsCluster.shutdown();
}
}
return;
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* 1. create DFS cluster with 3 storage directories
* - 2 EDITS_IMAGE(name1, name2), 1 EDITS(name3)
* 2. create a file
* 3. corrupt/disable name2 and name3 by removing rwx permission
* 4. run doCheckpoint
* - will fail on removed dirs (which invalidates them)
* 5. write another file
* 6. check there is only one healthy storage dir
* 7. run doCheckpoint - recover should fail but checkpoint should succeed
* 8. check there is still only one healthy storage dir
* 9. restore the access permission for name2 and name 3, run checkpoint again
* 10.verify there are 3 healthy storage dirs.
*/
@Test public void testStorageRestoreFailure() throws Exception {
SecondaryNameNode secondary=null;
String nameDir2=Shell.WINDOWS ? (new File(path2,"current").getAbsolutePath()) : path2.toString();
String nameDir3=Shell.WINDOWS ? (new File(path3,"current").getAbsolutePath()) : path3.toString();
try {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(0).manageNameDfsDirs(false).build();
cluster.waitActive();
secondary=new SecondaryNameNode(config);
printStorages(cluster.getNameNode().getFSImage());
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/","test");
assertTrue(fs.mkdirs(path));
assertTrue(FileUtil.chmod(nameDir2,"000") == 0);
assertTrue(FileUtil.chmod(nameDir3,"000") == 0);
secondary.doCheckpoint();
printStorages(cluster.getNameNode().getFSImage());
path=new Path("/","test1");
assertTrue(fs.mkdirs(path));
assert (cluster.getNameNode().getFSImage().getStorage().getNumStorageDirs() == 1);
secondary.doCheckpoint();
assert (cluster.getNameNode().getFSImage().getStorage().getNumStorageDirs() == 1);
assertTrue(FileUtil.chmod(nameDir2,"755") == 0);
assertTrue(FileUtil.chmod(nameDir3,"755") == 0);
secondary.doCheckpoint();
assert (cluster.getNameNode().getFSImage().getStorage().getNumStorageDirs() == 3);
}
finally {
if (path2.exists()) {
FileUtil.chmod(nameDir2,"755");
}
if (path3.exists()) {
FileUtil.chmod(nameDir3,"755");
}
if (cluster != null) {
cluster.shutdown();
}
if (secondary != null) {
secondary.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test for downloading a checkpoint made at a later checkpoint
* from the active.
*/
@Test public void testDownloadingLaterCheckpoint() throws Exception {
nn0.getRpcServer().rollEditLog();
nn0.getRpcServer().rollEditLog();
NameNodeAdapter.enterSafeMode(nn0,false);
NameNodeAdapter.saveNamespace(nn0);
NameNodeAdapter.leaveSafeMode(nn0);
long expectedCheckpointTxId=NameNodeAdapter.getNamesystem(nn0).getFSImage().getMostRecentCheckpointTxId();
assertEquals(6,expectedCheckpointTxId);
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of((int)expectedCheckpointTxId));
FSImageTestUtil.assertNNFilesMatch(cluster);
cluster.restartNameNode(1);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* BootstrapStandby when the existing NN is standby
*/
@Test public void testBootstrapStandbyWithStandbyNN() throws Exception {
cluster.transitionToStandby(0);
Configuration confNN1=cluster.getConfiguration(1);
cluster.shutdownNameNode(1);
int rc=BootstrapStandby.run(new String[]{"-force"},confNN1);
assertEquals(0,rc);
FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of(0));
FSImageTestUtil.assertNNFilesMatch(cluster);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* BootstrapStandby when the existing NN is active
*/
@Test public void testBootstrapStandbyWithActiveNN() throws Exception {
cluster.transitionToActive(0);
Configuration confNN1=cluster.getConfiguration(1);
cluster.shutdownNameNode(1);
int rc=BootstrapStandby.run(new String[]{"-force"},confNN1);
assertEquals(0,rc);
FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of(0));
FSImageTestUtil.assertNNFilesMatch(cluster);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Make sure that starting a second NN with the -upgrade flag fails if the
* other NN has already done that.
*/
@Test public void testCannotUpgradeSecondNameNode() throws IOException, URISyntaxException {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
cluster.getNameNodeInfos()[1].setStartOpt(StartupOption.UPGRADE);
try {
cluster.restartNameNode(1,false);
fail("Should not have been able to start second NN with -upgrade");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("It looks like the shared log is already being upgraded",ioe);
}
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Ensure that an admin cannot finalize an HA upgrade without at least one NN
* being active.
*/
@Test public void testCannotFinalizeIfNoActive() throws IOException, URISyntaxException {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue(fs.mkdirs(new Path("/foo4")));
assertCTimesEqual(cluster);
cluster.transitionToStandby(1);
try {
runFinalizeCommand(cluster);
fail("Should not have been able to finalize upgrade with no NN active");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot finalize with no NameNode active",ioe);
}
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that an HA NN with NFS-based HA can successfully start and
* upgrade.
*/
@Test public void testNfsUpgrade() throws IOException, URISyntaxException {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue(fs.mkdirs(new Path("/foo4")));
assertCTimesEqual(cluster);
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFinalizeWithJournalNodes() throws IOException, URISyntaxException {
MiniQJMHACluster qjCluster=null;
FileSystem fs=null;
try {
Builder builder=new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(0);
qjCluster=builder.build();
MiniDFSCluster cluster=qjCluster.getDfsCluster();
checkJnPreviousDirExistence(qjCluster,false);
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
assertTrue(fs.mkdirs(new Path("/foo2")));
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
runFinalizeCommand(cluster);
checkClusterPreviousDirExistence(cluster,false);
checkJnPreviousDirExistence(qjCluster,false);
assertCTimesEqual(cluster);
}
finally {
if (fs != null) {
fs.close();
}
if (qjCluster != null) {
qjCluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that an HA NN can successfully upgrade when configured using
* JournalNodes.
*/
@Test public void testUpgradeWithJournalNodes() throws IOException, URISyntaxException {
MiniQJMHACluster qjCluster=null;
FileSystem fs=null;
try {
Builder builder=new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(0);
qjCluster=builder.build();
MiniDFSCluster cluster=qjCluster.getDfsCluster();
checkJnPreviousDirExistence(qjCluster,false);
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue(fs.mkdirs(new Path("/foo4")));
assertCTimesEqual(cluster);
}
finally {
if (fs != null) {
fs.close();
}
if (qjCluster != null) {
qjCluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test rollback with NFS shared dir.
*/
@Test public void testRollbackWithNfs() throws Exception {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertCTimesEqual(cluster);
Collection nn1NameDirs=cluster.getNameDirs(0);
cluster.shutdown();
conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,Joiner.on(",").join(nn1NameDirs));
NameNode.doRollback(conf,false);
checkNnPreviousDirExistence(cluster,0,false);
checkPreviousDirExistence(sharedDir,false);
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRollbackWithJournalNodes() throws IOException, URISyntaxException {
MiniQJMHACluster qjCluster=null;
FileSystem fs=null;
try {
Builder builder=new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(0);
qjCluster=builder.build();
MiniDFSCluster cluster=qjCluster.getDfsCluster();
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkJnPreviousDirExistence(qjCluster,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
assertCTimesEqual(cluster);
Collection nn1NameDirs=cluster.getNameDirs(0);
cluster.shutdown();
conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,Joiner.on(",").join(nn1NameDirs));
NameNode.doRollback(conf,false);
checkNnPreviousDirExistence(cluster,0,false);
checkJnPreviousDirExistence(qjCluster,false);
}
finally {
if (fs != null) {
fs.close();
}
if (qjCluster != null) {
qjCluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that even if the NN which initiated the upgrade is in the standby
* state that we're allowed to finalize.
*/
@Test public void testFinalizeFromSecondNameNodeWithJournalNodes() throws IOException, URISyntaxException {
MiniQJMHACluster qjCluster=null;
FileSystem fs=null;
try {
Builder builder=new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(0);
qjCluster=builder.build();
MiniDFSCluster cluster=qjCluster.getDfsCluster();
checkJnPreviousDirExistence(qjCluster,false);
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
runFinalizeCommand(cluster);
checkClusterPreviousDirExistence(cluster,false);
checkJnPreviousDirExistence(qjCluster,false);
assertCTimesEqual(cluster);
}
finally {
if (fs != null) {
fs.close();
}
if (qjCluster != null) {
qjCluster.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test if StandbyException can be thrown from StandbyNN, when it's requested for
* password. (HDFS-6475). With StandbyException, the client can failover to try
* activeNN.
*/
@Test public void testDelegationTokenStandbyNNAppearFirst() throws Exception {
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
final DelegationTokenSecretManager stSecretManager=NameNodeAdapter.getDtSecretManager(nn1.getNamesystem());
final Token token=getDelegationToken(fs,"JobTracker");
final DelegationTokenIdentifier identifier=new DelegationTokenIdentifier();
byte[] tokenId=token.getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
assertTrue(null != stSecretManager.retrievePassword(identifier));
final UserGroupInformation ugi=UserGroupInformation.createRemoteUser("JobTracker");
ugi.addToken(token);
ugi.doAs(new PrivilegedExceptionAction(){
@Override public Object run(){
try {
try {
byte[] tmppw=dtSecretManager.retrievePassword(identifier);
fail("InvalidToken with cause StandbyException is expected" + " since nn0 is standby");
return tmppw;
}
catch ( IOException e) {
throw new SecurityException("Failed to obtain user group information: " + e,e);
}
}
catch ( Exception oe) {
HttpServletResponse response=mock(HttpServletResponse.class);
ExceptionHandler eh=new ExceptionHandler();
eh.initResponse(response);
Response resp=eh.toResponse(oe);
Map,?> m=(Map,?>)JSON.parse(resp.getEntity().toString());
RemoteException re=JsonUtil.toRemoteException(m);
Exception unwrapped=((RemoteException)re).unwrapRemoteException(StandbyException.class);
assertTrue(unwrapped instanceof StandbyException);
return null;
}
}
}
);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* HDFS-3062: DistributedFileSystem.getCanonicalServiceName() throws an
* exception if the URI is a logical URI. This bug fails the combination of
* ha + mapred + security.
*/
@Test public void testDFSGetCanonicalServiceName() throws Exception {
URI hAUri=HATestUtil.getLogicalUri(cluster);
String haService=HAUtil.buildTokenServiceForLogicalUri(hAUri,HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService,dfs.getCanonicalServiceName());
final String renewer=UserGroupInformation.getCurrentUser().getShortUserName();
final Token token=getDelegationToken(dfs,renewer);
assertEquals(haService,token.getService().toString());
token.renew(dfs.getConf());
token.cancel(dfs.getConf());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testHdfsGetCanonicalServiceName() throws Exception {
Configuration conf=dfs.getConf();
URI haUri=HATestUtil.getLogicalUri(cluster);
AbstractFileSystem afs=AbstractFileSystem.createFileSystem(haUri,conf);
String haService=HAUtil.buildTokenServiceForLogicalUri(haUri,HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService,afs.getCanonicalServiceName());
Token> token=afs.getDelegationTokens(UserGroupInformation.getCurrentUser().getShortUserName()).get(0);
assertEquals(haService,token.getService().toString());
token.renew(conf);
token.cancel(conf);
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testHAUtilClonesDelegationTokens() throws Exception {
final Token token=getDelegationToken(fs,"JobTracker");
UserGroupInformation ugi=UserGroupInformation.createRemoteUser("test");
URI haUri=new URI("hdfs://my-ha-uri/");
token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri,HdfsConstants.HDFS_URI_SCHEME));
ugi.addToken(token);
Collection nnAddrs=new HashSet();
nnAddrs.add(new InetSocketAddress("localhost",nn0.getNameNodeAddress().getPort()));
nnAddrs.add(new InetSocketAddress("localhost",nn1.getNameNodeAddress().getPort()));
HAUtil.cloneDelegationTokenForLogicalUri(ugi,haUri,nnAddrs);
Collection> tokens=ugi.getTokens();
assertEquals(3,tokens.size());
LOG.info("Tokens:\n" + Joiner.on("\n").join(tokens));
DelegationTokenSelector dts=new DelegationTokenSelector();
for ( InetSocketAddress addr : nnAddrs) {
Text ipcDtService=SecurityUtil.buildTokenService(addr);
Token token2=dts.selectToken(ipcDtService,ugi.getTokens());
assertNotNull(token2);
assertArrayEquals(token.getIdentifier(),token2.getIdentifier());
assertArrayEquals(token.getPassword(),token2.getPassword());
}
SecurityUtilTestHelper.setTokenServiceUseIp(false);
for ( InetSocketAddress addr : nnAddrs) {
Text ipcDtService=SecurityUtil.buildTokenService(addr);
Token token2=dts.selectToken(ipcDtService,ugi.getTokens());
assertNull(token2);
}
HAUtil.cloneDelegationTokenForLogicalUri(ugi,haUri,nnAddrs);
for ( InetSocketAddress addr : nnAddrs) {
Text ipcDtService=SecurityUtil.buildTokenService(addr);
Token token2=dts.selectToken(ipcDtService,ugi.getTokens());
assertNotNull(token2);
assertArrayEquals(token.getIdentifier(),token2.getIdentifier());
assertArrayEquals(token.getPassword(),token2.getPassword());
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void ensureInvalidBlockTokensAreRejected() throws IOException, URISyntaxException {
cluster.transitionToActive(0);
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
DFSTestUtil.writeFile(fs,TEST_PATH,TEST_DATA);
assertEquals(TEST_DATA,DFSTestUtil.readFile(fs,TEST_PATH));
DFSClient dfsClient=DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
DFSClient spyDfsClient=Mockito.spy(dfsClient);
Mockito.doAnswer(new Answer(){
@Override public LocatedBlocks answer( InvocationOnMock arg0) throws Throwable {
LocatedBlocks locatedBlocks=(LocatedBlocks)arg0.callRealMethod();
for ( LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
Token token=lb.getBlockToken();
BlockTokenIdentifier id=lb.getBlockToken().decodeIdentifier();
id.setExpiryDate(Time.now() + 10);
Token newToken=new Token(id.getBytes(),token.getPassword(),token.getKind(),token.getService());
lb.setBlockToken(newToken);
}
return locatedBlocks;
}
}
).when(spyDfsClient).getLocatedBlocks(Mockito.anyString(),Mockito.anyLong(),Mockito.anyLong());
DFSClientAdapter.setDFSClient((DistributedFileSystem)fs,spyDfsClient);
try {
assertEquals(TEST_DATA,DFSTestUtil.readFile(fs,TEST_PATH));
fail("Shouldn't have been able to read a file with invalid block tokens");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Could not obtain block",ioe);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that marking the shared edits dir as being "required" causes the NN to
* fail if that dir can't be accessed.
*/
@Test public void testFailureOfSharedDir() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,2000);
MiniDFSCluster cluster=null;
File sharedEditsDir=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).checkExitOnShutdown(false).build();
cluster.waitActive();
cluster.transitionToActive(0);
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/test1")));
URI sharedEditsUri=cluster.getSharedEditsDir(0,1);
sharedEditsDir=new File(sharedEditsUri);
assertEquals(0,FileUtil.chmod(sharedEditsDir.getAbsolutePath(),"-w",true));
Thread.sleep(conf.getLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT) * 2);
NameNode nn1=cluster.getNameNode(1);
assertTrue(nn1.isStandbyState());
assertFalse("StandBy NameNode should not go to SafeMode on resource unavailability",nn1.isInSafeMode());
NameNode nn0=cluster.getNameNode(0);
try {
nn0.getRpcServer().rollEditLog();
fail("Succeeded in rolling edit log despite shared dir being deleted");
}
catch ( ExitException ee) {
GenericTestUtils.assertExceptionContains("finalize log segment 1, 3 failed for required journal",ee);
}
for ( URI editsUri : cluster.getNameEditsDirs(0)) {
if (editsUri.equals(sharedEditsUri)) {
continue;
}
File editsDir=new File(editsUri.getPath());
File curDir=new File(editsDir,"current");
GenericTestUtils.assertGlobEquals(curDir,"edits_.*",NNStorage.getInProgressEditsFileName(1));
}
}
finally {
if (sharedEditsDir != null) {
FileUtil.chmod(sharedEditsDir.getAbsolutePath(),"+w",true);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test that the shared edits dir is automatically added to the list of edits
* dirs that are marked required.
*/
@Test public void testSharedDirIsAutomaticallyMarkedRequired() throws URISyntaxException {
URI foo=new URI("file:/foo");
URI bar=new URI("file:/bar");
Configuration conf=new Configuration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,Joiner.on(",").join(foo,bar));
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,foo.toString());
assertFalse(FSNamesystem.getRequiredNamespaceEditsDirs(conf).contains(bar));
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,bar.toString());
Collection requiredEditsDirs=FSNamesystem.getRequiredNamespaceEditsDirs(conf);
assertTrue(Joiner.on(",").join(requiredEditsDirs) + " does not contain " + bar,requiredEditsDirs.contains(bar));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Make sure that the shared edits dirs are listed before non-shared dirs
* when the configuration is parsed. This ensures that the shared journals
* are synced before the local ones.
*/
@Test public void testSharedDirsComeFirstInEditsList() throws Exception {
Configuration conf=new Configuration();
URI sharedA=new URI("file:///shared-A");
URI localA=new URI("file:///local-A");
URI localB=new URI("file:///local-B");
URI localC=new URI("file:///local-C");
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,sharedA.toString());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,Joiner.on(",").join(localC,localB,localA));
List dirs=FSNamesystem.getNamespaceEditsDirs(conf);
assertEquals("Shared dirs should come first, then local dirs, in the order " + "they were listed in the configuration.",Joiner.on(",").join(sharedA,localC,localB,localA),Joiner.on(",").join(dirs));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test the following case:
* 1. SBN is reading a finalized edits file when NFS disappears halfway
* through (or some intermittent error happens)
* 2. SBN performs a checkpoint and uploads it to the NN
* 3. NN receives a checkpoint that doesn't correspond to the end of any log
* segment
* 4. Both NN and SBN should be able to restart at this point.
* This is a regression test for HDFS-2766.
*/
@Test public void testCheckpointStartingMidEditsFile() throws Exception {
assertTrue(fs.mkdirs(new Path(TEST_DIR1)));
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of(0,3));
HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3));
causeFailureOnEditLogRead();
assertTrue(fs.mkdirs(new Path(TEST_DIR2)));
assertTrue(fs.mkdirs(new Path(TEST_DIR3)));
try {
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
fail("Standby fully caught up, but should not have been able to");
}
catch ( HATestUtil.CouldNotCatchUpException e) {
}
HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of(0,3,5));
HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3,5));
cluster.restartNameNode(0);
HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3,5));
FileSystem fs0=null;
try {
fs0=FileSystem.get(NameNode.getUri(nn0.getNameNodeAddress()),conf);
assertTrue(fs0.exists(new Path(TEST_DIR1)));
assertTrue(fs0.exists(new Path(TEST_DIR2)));
assertTrue(fs0.exists(new Path(TEST_DIR3)));
}
finally {
if (fs0 != null) fs0.close();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Tests that the namenode edits dirs and shared edits dirs are gotten with
* duplicates removed
*/
@Test public void testHAUniqueEditDirs() throws IOException {
Configuration conf=new Configuration();
conf.set(DFS_NAMENODE_EDITS_DIR_KEY,"file://edits/dir, " + "file://edits/shared/dir");
conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY,"file://edits/shared/dir");
Collection editsDirs=FSNamesystem.getNamespaceEditsDirs(conf);
assertEquals(2,editsDirs.size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test for HDFS-2812. Since lease renewals go from the client
* only to the active NN, the SBN will have out-of-date lease
* info when it becomes active. We need to make sure we don't
* accidentally mark the leases as expired when the failover
* proceeds.
*/
@Test(timeout=120000) public void testLeasesRenewedOnTransition() throws Exception {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
FSDataOutputStream stm=null;
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
NameNode nn0=cluster.getNameNode(0);
NameNode nn1=cluster.getNameNode(1);
try {
cluster.waitActive();
cluster.transitionToActive(0);
LOG.info("Starting with NN 0 active");
stm=fs.create(TEST_FILE_PATH);
long nn0t0=NameNodeAdapter.getLeaseRenewalTime(nn0,TEST_FILE_STR);
assertTrue(nn0t0 > 0);
long nn1t0=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR);
assertEquals("Lease should not yet exist on nn1",-1,nn1t0);
Thread.sleep(5);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
long nn1t1=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR);
assertTrue("Lease should have been created on standby. Time was: " + nn1t1,nn1t1 > nn0t0);
Thread.sleep(5);
LOG.info("Failing over to NN 1");
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
long nn1t2=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR);
assertTrue("Lease should have been renewed by failover process",nn1t2 > nn1t1);
}
finally {
IOUtils.closeStream(stm);
cluster.shutdown();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This test also serves to test{@link HAUtil#getProxiesForAllNameNodesInNameservice(Configuration,String)} and{@link DFSUtil#getRpcAddressesForNameserviceId(Configuration,String,String)}by virtue of the fact that it wouldn't work properly if the proxies
* returned were not for the correct NNs.
*/
@Test public void testIsAtLeastOneActive() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(new HdfsConfiguration()).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
try {
Configuration conf=new HdfsConfiguration();
HATestUtil.setFailoverConfigurations(cluster,conf);
List namenodes=HAUtil.getProxiesForAllNameNodesInNameservice(conf,HATestUtil.getLogicalHostname(cluster));
assertEquals(2,namenodes.size());
assertFalse(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToActive(0);
assertTrue(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToStandby(0);
assertFalse(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToActive(1);
assertTrue(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToStandby(1);
assertFalse(HAUtil.isAtLeastOneActive(namenodes));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testChangedStorageId() throws IOException, URISyntaxException, InterruptedException {
HdfsConfiguration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).nnTopology(MiniDFSNNTopology.simpleHATopology()).build();
try {
cluster.transitionToActive(0);
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
OutputStream out=fs.create(filePath);
out.write("foo bar baz".getBytes());
out.close();
HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),cluster.getNameNode(1));
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,filePath);
assertTrue(MiniDFSCluster.changeGenStampOfBlock(0,block,900));
DataNodeProperties dnProps=cluster.stopDataNode(0);
cluster.restartNameNode(1,false);
assertTrue(cluster.restartDataNode(dnProps,true));
while (cluster.getNamesystem(1).getBlockManager().getPendingDataNodeMessageCount() < 1) {
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
}
assertEquals(1,cluster.getNamesystem(1).getBlockManager().getPendingDataNodeMessageCount());
String oldStorageId=getRegisteredDatanodeUid(cluster,1);
assertTrue(wipeAndRestartDn(cluster,0));
String newStorageId="";
do {
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
newStorageId=getRegisteredDatanodeUid(cluster,1);
System.out.println("====> oldStorageId: " + oldStorageId + " newStorageId: "+ newStorageId);
}
while (newStorageId.equals(oldStorageId));
assertEquals(0,cluster.getNamesystem(1).getBlockManager().getPendingDataNodeMessageCount());
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test the scenario where the NN fails over after issuing a block
* synchronization request, but before it is committed. The
* DN running the recovery should then fail to commit the synchronization
* and a later retry will succeed.
*/
@Test(timeout=30000) public void testFailoverRightBeforeCommitSynchronization() throws Exception {
final Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,false);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
FSDataOutputStream stm=null;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
Thread.sleep(500);
LOG.info("Starting with NN 0 active");
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
stm=fs.create(TEST_PATH);
AppendTestUtil.write(stm,0,BLOCK_SIZE / 2);
stm.hflush();
NameNode nn0=cluster.getNameNode(0);
ExtendedBlock blk=DFSTestUtil.getFirstBlock(fs,TEST_PATH);
DatanodeDescriptor expectedPrimary=DFSTestUtil.getExpectedPrimaryNode(nn0,blk);
LOG.info("Expecting block recovery to be triggered on DN " + expectedPrimary);
DataNode primaryDN=cluster.getDataNode(expectedPrimary.getIpcPort());
DatanodeProtocolClientSideTranslatorPB nnSpy=DataNodeTestUtils.spyOnBposToNN(primaryDN,nn0);
DelayAnswer delayer=new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(Mockito.eq(blk),Mockito.anyInt(),Mockito.anyLong(),Mockito.eq(true),Mockito.eq(false),(DatanodeID[])Mockito.anyObject(),(String[])Mockito.anyObject());
DistributedFileSystem fsOtherUser=createFsAsOtherUser(cluster,conf);
assertFalse(fsOtherUser.recoverLease(TEST_PATH));
LOG.info("Waiting for commitBlockSynchronization call from primary");
delayer.waitForCall();
LOG.info("Failing over to NN 1");
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
delayer.proceed();
delayer.waitForResult();
Throwable t=delayer.getThrown();
if (t == null) {
fail("commitBlockSynchronization call did not fail on standby");
}
GenericTestUtils.assertExceptionContains("Operation category WRITE is not supported",t);
loopRecoverLease(fsOtherUser,TEST_PATH);
AppendTestUtil.check(fs,TEST_PATH,BLOCK_SIZE / 2);
}
finally {
IOUtils.closeStream(stm);
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Tests lease recovery if a client crashes. This approximates the
* use case of HBase WALs being recovered after a NN failover.
*/
@Test(timeout=30000) public void testLeaseRecoveryAfterFailover() throws Exception {
final Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,false);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
FSDataOutputStream stm=null;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
Thread.sleep(500);
LOG.info("Starting with NN 0 active");
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
stm=fs.create(TEST_PATH);
AppendTestUtil.write(stm,0,BLOCK_AND_A_HALF);
stm.hflush();
LOG.info("Failing over to NN 1");
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue(fs.exists(TEST_PATH));
FileSystem fsOtherUser=createFsAsOtherUser(cluster,conf);
loopRecoverLease(fsOtherUser,TEST_PATH);
AppendTestUtil.check(fs,TEST_PATH,BLOCK_AND_A_HALF);
cluster.transitionToStandby(1);
cluster.transitionToActive(0);
AppendTestUtil.check(fs,TEST_PATH,BLOCK_AND_A_HALF);
}
finally {
IOUtils.closeStream(stm);
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier
/**
* Test that quotas are properly tracked by the standby through
* create, append, delete.
*/
@Test(timeout=60000) public void testQuotasTrackedOnStandby() throws Exception {
fs.mkdirs(TEST_DIR);
DistributedFileSystem dfs=(DistributedFileSystem)fs;
dfs.setQuota(TEST_DIR,NS_QUOTA,DS_QUOTA);
long expectedSize=3 * BLOCK_SIZE + BLOCK_SIZE / 2;
DFSTestUtil.createFile(fs,TEST_FILE,expectedSize,(short)1,1L);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
ContentSummary cs=nn1.getRpcServer().getContentSummary(TEST_DIR_STR);
assertEquals(NS_QUOTA,cs.getQuota());
assertEquals(DS_QUOTA,cs.getSpaceQuota());
assertEquals(expectedSize,cs.getSpaceConsumed());
assertEquals(1,cs.getDirectoryCount());
assertEquals(1,cs.getFileCount());
FSDataOutputStream stm=fs.append(TEST_FILE);
try {
byte[] data=new byte[(int)(BLOCK_SIZE * 3 / 2)];
stm.write(data);
expectedSize+=data.length;
}
finally {
IOUtils.closeStream(stm);
}
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
cs=nn1.getRpcServer().getContentSummary(TEST_DIR_STR);
assertEquals(NS_QUOTA,cs.getQuota());
assertEquals(DS_QUOTA,cs.getSpaceQuota());
assertEquals(expectedSize,cs.getSpaceConsumed());
assertEquals(1,cs.getDirectoryCount());
assertEquals(1,cs.getFileCount());
fs.delete(TEST_FILE,true);
expectedSize=0;
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
cs=nn1.getRpcServer().getContentSummary(TEST_DIR_STR);
assertEquals(NS_QUOTA,cs.getQuota());
assertEquals(DS_QUOTA,cs.getSpaceQuota());
assertEquals(expectedSize,cs.getSpaceConsumed());
assertEquals(1,cs.getDirectoryCount());
assertEquals(0,cs.getFileCount());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* 1. Run a set of operations
* 2. Trigger the NN failover
* 3. Check the retry cache on the original standby NN
*/
@Test(timeout=60000) public void testRetryCacheOnStandbyNN() throws Exception {
DFSTestUtil.runOperations(cluster,dfs,conf,BlockSize,0);
FSNamesystem fsn0=cluster.getNamesystem(0);
LightWeightCache cacheSet=(LightWeightCache)fsn0.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
Map oldEntries=new HashMap();
Iterator iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
oldEntries.put(entry,entry);
}
cluster.getNameNode(0).getRpcServer().rollEditLog();
cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits();
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
FSNamesystem fsn1=cluster.getNamesystem(1);
cacheSet=(LightWeightCache)fsn1.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
assertTrue(oldEntries.containsKey(entry));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Regression test for HDFS-2795:
* - Start an HA cluster with a DN.
* - Write several blocks to the FS with replication 1.
* - Shutdown the DN
* - Wait for the NNs to declare the DN dead. All blocks will be under-replicated.
* - Restart the DN.
* In the bug, the standby node would only very slowly notice the blocks returning
* to the cluster.
*/
@Test(timeout=60000) public void testDatanodeRestarts() throws Exception {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,1024);
HAUtil.setAllowStandbyReads(conf,true);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,0);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
try {
NameNode nn0=cluster.getNameNode(0);
NameNode nn1=cluster.getNameNode(1);
cluster.transitionToActive(0);
DFSTestUtil.createFile(cluster.getFileSystem(0),TEST_FILE_PATH,5 * 1024,(short)1,1L);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
DataNode dn=cluster.getDataNodes().get(0);
String dnName=dn.getDatanodeId().getXferAddr();
DataNodeProperties dnProps=cluster.stopDataNode(0);
BlockManagerTestUtil.noticeDeadDatanode(nn0,dnName);
BlockManagerTestUtil.noticeDeadDatanode(nn1,dnName);
BlockManagerTestUtil.updateState(nn0.getNamesystem().getBlockManager());
BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
assertEquals(5,nn0.getNamesystem().getUnderReplicatedBlocks());
assertEquals(0,nn1.getNamesystem().getUnderReplicatedBlocks());
LocatedBlocks locs=nn1.getRpcServer().getBlockLocations(TEST_FILE,0,1);
assertEquals("Standby should have registered that the block has no replicas",0,locs.get(0).getLocations().length);
cluster.restartDataNode(dnProps);
cluster.waitActive(0);
cluster.waitActive(1);
BlockManagerTestUtil.updateState(nn0.getNamesystem().getBlockManager());
BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
assertEquals(0,nn0.getNamesystem().getUnderReplicatedBlocks());
assertEquals(0,nn1.getNamesystem().getUnderReplicatedBlocks());
locs=nn1.getRpcServer().getBlockLocations(TEST_FILE,0,1);
assertEquals("Standby should have registered that the block has replicas again",1,locs.get(0).getLocations().length);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier BooleanVerifier
/**
* Test NN checkpoint and transaction-related metrics.
*/
@Test public void testTransactionAndCheckpointMetrics() throws Exception {
long lastCkptTime=MetricsAsserts.getLongGauge("LastCheckpointTime",getMetrics(NS_METRICS));
assertGauge("LastCheckpointTime",lastCkptTime,getMetrics(NS_METRICS));
assertGauge("LastWrittenTransactionId",1L,getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint",1L,getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll",1L,getMetrics(NS_METRICS));
fs.mkdirs(new Path(TEST_ROOT_DIR_PATH,"/tmp"));
assertGauge("LastCheckpointTime",lastCkptTime,getMetrics(NS_METRICS));
assertGauge("LastWrittenTransactionId",2L,getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint",2L,getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll",2L,getMetrics(NS_METRICS));
cluster.getNameNodeRpc().rollEditLog();
assertGauge("LastCheckpointTime",lastCkptTime,getMetrics(NS_METRICS));
assertGauge("LastWrittenTransactionId",4L,getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint",4L,getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll",1L,getMetrics(NS_METRICS));
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER,false);
cluster.getNameNodeRpc().saveNamespace();
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false);
long newLastCkptTime=MetricsAsserts.getLongGauge("LastCheckpointTime",getMetrics(NS_METRICS));
assertTrue(lastCkptTime < newLastCkptTime);
assertGauge("LastWrittenTransactionId",6L,getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint",1L,getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll",1L,getMetrics(NS_METRICS));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* call DFSClient#callGetBlockLocations(...) for snapshot file. Make sure only
* blocks within the size range are returned.
*/
@Test public void testGetBlockLocations() throws Exception {
final Path root=new Path("/");
final Path file=new Path("/file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed);
SnapshotTestHelper.createSnapshot(hdfs,root,"s1");
final Path fileInSnapshot=SnapshotTestHelper.getSnapshotPath(root,"s1",file.getName());
FileStatus status=hdfs.getFileStatus(fileInSnapshot);
assertEquals(BLOCKSIZE,status.getLen());
DFSTestUtil.appendFile(hdfs,file,BLOCKSIZE - 1);
status=hdfs.getFileStatus(fileInSnapshot);
assertEquals(BLOCKSIZE,status.getLen());
status=hdfs.getFileStatus(file);
assertEquals(BLOCKSIZE * 2 - 1,status.getLen());
LocatedBlocks blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),fileInSnapshot.toString(),0,Long.MAX_VALUE);
List blockList=blocks.getLocatedBlocks();
assertEquals(BLOCKSIZE,blocks.getFileLength());
assertEquals(1,blockList.size());
LocatedBlock lastBlock=blocks.getLastLocatedBlock();
assertEquals(0,lastBlock.getStartOffset());
assertEquals(BLOCKSIZE,lastBlock.getBlockSize());
SnapshotTestHelper.createSnapshot(hdfs,root,"s2");
final Path fileInSnapshot2=SnapshotTestHelper.getSnapshotPath(root,"s2",file.getName());
HdfsDataOutputStream out=appendFileWithoutClosing(file,BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
status=hdfs.getFileStatus(fileInSnapshot2);
assertEquals(BLOCKSIZE * 2 - 1,status.getLen());
status=hdfs.getFileStatus(file);
assertEquals(BLOCKSIZE * 3 - 1,status.getLen());
blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),fileInSnapshot2.toString(),0,Long.MAX_VALUE);
assertFalse(blocks.isUnderConstruction());
assertTrue(blocks.isLastBlockComplete());
blockList=blocks.getLocatedBlocks();
assertEquals(BLOCKSIZE * 2 - 1,blocks.getFileLength());
assertEquals(2,blockList.size());
lastBlock=blocks.getLastLocatedBlock();
assertEquals(BLOCKSIZE,lastBlock.getStartOffset());
assertEquals(BLOCKSIZE,lastBlock.getBlockSize());
blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),fileInSnapshot2.toString(),BLOCKSIZE,0);
blockList=blocks.getLocatedBlocks();
assertEquals(1,blockList.size());
blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),file.toString(),0,Long.MAX_VALUE);
blockList=blocks.getLocatedBlocks();
assertEquals(3,blockList.size());
assertTrue(blocks.isUnderConstruction());
assertFalse(blocks.isLastBlockComplete());
lastBlock=blocks.getLastLocatedBlock();
assertEquals(BLOCKSIZE * 2,lastBlock.getStartOffset());
assertEquals(BLOCKSIZE - 1,lastBlock.getBlockSize());
out.close();
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test the snapshot limit of a single snapshottable directory.
* @throws Exception
*/
@Test(timeout=300000) public void testSnapshotLimit() throws Exception {
final int step=1000;
final String dirStr="/testSnapshotLimit/dir";
final Path dir=new Path(dirStr);
hdfs.mkdirs(dir,new FsPermission((short)0777));
hdfs.allowSnapshot(dir);
int s=0;
for (; s < SNAPSHOT_LIMIT; s++) {
final String snapshotName="s" + s;
hdfs.createSnapshot(dir,snapshotName);
if (s % step == 0) {
final Path file=new Path(dirStr,"f" + s);
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,SEED);
}
}
try {
hdfs.createSnapshot(dir,"s" + s);
Assert.fail("Expected to fail to create snapshot, but didn't.");
}
catch ( IOException ioe) {
SnapshotTestHelper.LOG.info("The exception is expected.",ioe);
}
for (int f=0; f < SNAPSHOT_LIMIT; f+=step) {
final String file="f" + f;
s=RANDOM.nextInt(step);
for (; s < SNAPSHOT_LIMIT; s+=RANDOM.nextInt(step)) {
final Path p=SnapshotTestHelper.getSnapshotPath(dir,"s" + s,file);
Assert.assertEquals(s > f,hdfs.exists(p));
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test {@link Snapshot#ID_COMPARATOR}.
*/
@Test(timeout=300000) public void testIdCmp(){
final PermissionStatus perm=PermissionStatus.createImmutable("user","group",FsPermission.createImmutable((short)0));
final INodeDirectory snapshottable=new INodeDirectory(0,DFSUtil.string2Bytes("foo"),perm,0L);
snapshottable.addSnapshottableFeature();
final Snapshot[] snapshots={new Snapshot(1,"s1",snapshottable),new Snapshot(1,"s1",snapshottable),new Snapshot(2,"s2",snapshottable),new Snapshot(2,"s2",snapshottable)};
Assert.assertEquals(0,Snapshot.ID_COMPARATOR.compare(null,null));
for ( Snapshot s : snapshots) {
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(null,s) > 0);
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(s,null) < 0);
for ( Snapshot t : snapshots) {
final int expected=s.getRoot().getLocalName().compareTo(t.getRoot().getLocalName());
final int computed=Snapshot.ID_COMPARATOR.compare(s,t);
Assert.assertEquals(expected > 0,computed > 0);
Assert.assertEquals(expected == 0,computed == 0);
Assert.assertEquals(expected < 0,computed < 0);
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test(timeout=300000) public void testRenameFromSDir2NonSDir() throws Exception {
final String dirStr="/testRenameWithSnapshot";
final String abcStr=dirStr + "/abc";
final Path abc=new Path(abcStr);
hdfs.mkdirs(abc,new FsPermission((short)0777));
hdfs.allowSnapshot(abc);
final Path foo=new Path(abc,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(abc,"s0");
try {
hdfs.rename(abc,new Path(dirStr,"tmp"));
fail("Expect exception since " + abc + " is snapshottable and already has snapshots");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains(abcStr + " is snapshottable and already has snapshots",e);
}
final String xyzStr=dirStr + "/xyz";
final Path xyz=new Path(xyzStr);
hdfs.mkdirs(xyz,new FsPermission((short)0777));
final Path bar=new Path(xyz,"bar");
hdfs.rename(foo,bar);
final INode fooRef=fsdir.getINode(SnapshotTestHelper.getSnapshotPath(abc,"s0","foo").toString());
Assert.assertTrue(fooRef.isReference());
Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);
final INodeReference.WithCount withCount=(INodeReference.WithCount)fooRef.asReference().getReferredINode();
Assert.assertEquals(2,withCount.getReferenceCount());
final INode barRef=fsdir.getINode(bar.toString());
Assert.assertTrue(barRef.isReference());
Assert.assertSame(withCount,barRef.asReference().getReferredINode());
hdfs.delete(bar,false);
Assert.assertEquals(1,withCount.getReferenceCount());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure we clean the whole subtree under a DstReference node after
* deleting a snapshot.
* see HDFS-5476.
*/
@Test public void testCleanDstReference() throws Exception {
final Path test=new Path("/test");
final Path foo=new Path(test,"foo");
final Path bar=new Path(foo,"bar");
hdfs.mkdirs(bar);
SnapshotTestHelper.createSnapshot(hdfs,test,"s0");
final Path fileInBar=new Path(bar,"file");
DFSTestUtil.createFile(hdfs,fileInBar,BLOCKSIZE,REPL,SEED);
final Path foo2=new Path(test,"foo2");
hdfs.rename(foo,foo2);
hdfs.createSnapshot(test,"s1");
hdfs.delete(new Path(foo2,"bar"),true);
hdfs.delete(foo2,true);
final Path sfileInBar=SnapshotTestHelper.getSnapshotPath(test,"s1","foo2/bar/file");
assertTrue(hdfs.exists(sfileInBar));
hdfs.deleteSnapshot(test,"s1");
assertFalse(hdfs.exists(sfileInBar));
restartClusterAndCheckImage(true);
final Path barInS0=SnapshotTestHelper.getSnapshotPath(test,"s0","foo/bar");
INodeDirectory barNode=fsdir.getINode(barInS0.toString()).asDirectory();
assertEquals(0,barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
List diffList=barNode.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
assertEquals(0,diff.getChildrenDiff().getList(ListType.DELETED).size());
assertEquals(0,diff.getChildrenDiff().getList(ListType.CREATED).size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir before taking the snapshot.
*/
@Test public void testRenameUndo_1() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
final Path dir2file=new Path(sdir2,"file");
DFSTestUtil.createFile(hdfs,dir2file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
INodeDirectory dir2=fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2=spy(dir2);
doReturn(false).when(mockDir2).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt());
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2,mockDir2,fsdir.getINodeMap());
final Path newfoo=new Path(sdir2,"foo");
boolean result=hdfs.rename(foo,newfoo);
assertFalse(result);
INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList dir1Children=dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir1Children.size());
assertEquals(foo.getName(),dir1Children.get(0).getLocalName());
List dir1Diffs=dir1Node.getDiffs().asList();
assertEquals(1,dir1Diffs.size());
assertEquals(s1.getId(),dir1Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff=dir1Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(0,childrenDiff.getList(ListType.CREATED).size());
INode fooNode=fsdir.getINode4Write(foo.toString());
assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot());
List fooDiffs=fooNode.asDirectory().getDiffs().asList();
assertEquals(1,fooDiffs.size());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo");
INode fooNode_s1=fsdir.getINode(foo_s1.toString());
assertTrue(fooNode_s1 == fooNode);
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
assertEquals(dir2file.getName(),dir2Children.get(0).getLocalName());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Unit test for HDFS-4842.
*/
@Test public void testRenameDirAndDeleteSnapshot_7() throws Exception {
fsn.getSnapshotManager().setAllowNestedSnapshots(true);
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
final Path foo=new Path(dir2,"foo");
final Path bar=new Path(foo,"bar");
final Path file=new Path(bar,"file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,test,"s0");
SnapshotTestHelper.createSnapshot(hdfs,test,"s1");
hdfs.delete(file,true);
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
final Path newfoo=new Path(dir1,foo.getName());
hdfs.rename(foo,newfoo);
hdfs.deleteSnapshot(test,"s1");
final Path file_s2=SnapshotTestHelper.getSnapshotPath(dir2,"s2","foo/bar/file");
assertFalse(hdfs.exists(file_s2));
final Path file_s0=SnapshotTestHelper.getSnapshotPath(test,"s0","dir2/foo/bar/file");
assertTrue(hdfs.exists(file_s0));
INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory();
List dir1DiffList=dir1Node.getDiffs().asList();
assertEquals(1,dir1DiffList.size());
List dList=dir1DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
assertTrue(dList.isEmpty());
List cList=dir1DiffList.get(0).getChildrenDiff().getList(ListType.CREATED);
assertEquals(1,cList.size());
INode cNode=cList.get(0);
INode fooNode=fsdir.getINode4Write(newfoo.toString());
assertSame(cNode,fooNode);
final Path newbar=new Path(newfoo,bar.getName());
INodeDirectory barNode=fsdir.getINode4Write(newbar.toString()).asDirectory();
assertSame(fooNode.asDirectory(),barNode.getParent());
List barDiffList=barNode.getDiffs().asList();
assertEquals(1,barDiffList.size());
DirectoryDiff diff=barDiffList.get(0);
INodeDirectory testNode=fsdir.getINode4Write(test.toString()).asDirectory();
Snapshot s0=testNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertEquals(s0.getId(),diff.getSnapshotId());
assertEquals("file",diff.getChildrenDiff().getList(ListType.DELETED).get(0).getLocalName());
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
List dir2DiffList=dir2Node.getDiffs().asList();
assertEquals(1,dir2DiffList.size());
dList=dir2DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
assertEquals(1,dList.size());
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(dir2,"s2",foo.getName());
INodeReference.WithName fooNode_s2=(INodeReference.WithName)fsdir.getINode(foo_s2.toString());
assertSame(dList.get(0),fooNode_s2);
assertSame(fooNode.asReference().getReferredINode(),fooNode_s2.getReferredINode());
restartClusterAndCheckImage(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir after taking the snapshot.
*/
@Test public void testRenameUndo_2() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path dir2file=new Path(sdir2,"file");
DFSTestUtil.createFile(hdfs,dir2file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
INodeDirectory dir2=fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2=spy(dir2);
doReturn(false).when(mockDir2).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt());
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2,mockDir2,fsdir.getINodeMap());
final Path newfoo=new Path(sdir2,"foo");
boolean result=hdfs.rename(foo,newfoo);
assertFalse(result);
INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList dir1Children=dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir1Children.size());
assertEquals(foo.getName(),dir1Children.get(0).getLocalName());
List dir1Diffs=dir1Node.getDiffs().asList();
assertEquals(1,dir1Diffs.size());
assertEquals(s1.getId(),dir1Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff=dir1Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(1,childrenDiff.getList(ListType.CREATED).size());
INode fooNode=fsdir.getINode4Write(foo.toString());
assertTrue(fooNode instanceof INodeDirectory);
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo");
assertFalse(hdfs.exists(foo_s1));
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
assertEquals(dir2file.getName(),dir2Children.get(0).getLocalName());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test rename a dir and a file multiple times across snapshottable
* directories: /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo
* Only create snapshots in the beginning (before the rename).
*/
@Test public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo_dir1=new Path(sdir1,"foo");
final Path bar1_dir1=new Path(foo_dir1,"bar1");
final Path bar2_dir1=new Path(sdir1,"bar");
DFSTestUtil.createFile(hdfs,bar1_dir1,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar2_dir1,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s3");
final Path foo_dir2=new Path(sdir2,"foo");
hdfs.rename(foo_dir1,foo_dir2);
final Path bar2_dir2=new Path(sdir2,"bar");
hdfs.rename(bar2_dir1,bar2_dir2);
restartClusterAndCheckImage(true);
final Path bar1_dir2=new Path(foo_dir2,"bar1");
hdfs.setReplication(bar1_dir2,REPL_1);
hdfs.setReplication(bar2_dir2,REPL_1);
final Path bar1_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo/bar1");
final Path bar2_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","bar");
final Path bar1_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar1");
final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
FileStatus statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL_1,statusBar1.getReplication());
FileStatus statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
statusBar2=hdfs.getFileStatus(bar2_dir2);
assertEquals(REPL_1,statusBar2.getReplication());
final Path foo_dir3=new Path(sdir3,"foo");
hdfs.rename(foo_dir2,foo_dir3);
final Path bar2_dir3=new Path(sdir3,"bar");
hdfs.rename(bar2_dir2,bar2_dir3);
restartClusterAndCheckImage(true);
final Path bar1_dir3=new Path(foo_dir3,"bar1");
hdfs.setReplication(bar1_dir3,REPL_2);
hdfs.setReplication(bar2_dir3,REPL_2);
final Path bar1_s3=SnapshotTestHelper.getSnapshotPath(sdir3,"s3","foo/bar1");
final Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir3,"s3","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir3);
assertEquals(REPL_2,statusBar1.getReplication());
statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
statusBar2=hdfs.getFileStatus(bar2_dir3);
assertEquals(REPL_2,statusBar2.getReplication());
hdfs.rename(foo_dir3,foo_dir2);
hdfs.rename(bar2_dir3,bar2_dir2);
restartClusterAndCheckImage(true);
hdfs.setReplication(bar1_dir2,REPL);
hdfs.setReplication(bar2_dir2,REPL);
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL,statusBar1.getReplication());
statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
statusBar2=hdfs.getFileStatus(bar2_dir2);
assertEquals(REPL,statusBar2.getReplication());
hdfs.rename(foo_dir2,foo_dir1);
hdfs.rename(bar2_dir2,bar2_dir1);
INodeReference fooRef=fsdir.getINode4Write(foo_dir1.toString()).asReference();
INodeReference.WithCount fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(2,fooWithCount.getReferenceCount());
INodeDirectory foo=fooWithCount.asDirectory();
assertEquals(1,foo.getDiffs().asList().size());
INodeDirectory sdir1Node=fsdir.getINode(sdir1.toString()).asDirectory();
Snapshot s1=sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),foo.getDirectoryWithSnapshotFeature().getLastSnapshotId());
INodeFile bar1=fsdir.getINode4Write(bar1_dir1.toString()).asFile();
assertEquals(1,bar1.getDiffs().asList().size());
assertEquals(s1.getId(),bar1.getDiffs().getLastSnapshotId());
INodeReference barRef=fsdir.getINode4Write(bar2_dir1.toString()).asReference();
INodeReference.WithCount barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(2,barWithCount.getReferenceCount());
INodeFile bar=barWithCount.asFile();
assertEquals(1,bar.getDiffs().asList().size());
assertEquals(s1.getId(),bar.getDiffs().getLastSnapshotId());
restartClusterAndCheckImage(true);
hdfs.delete(foo_dir1,true);
hdfs.delete(bar2_dir1,true);
restartClusterAndCheckImage(true);
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
assertFalse(hdfs.exists(foo_dir1));
assertFalse(hdfs.exists(bar1_dir1));
assertFalse(hdfs.exists(bar2_dir1));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo");
fooRef=fsdir.getINode(foo_s1.toString()).asReference();
fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(1,fooWithCount.getReferenceCount());
barRef=fsdir.getINode(bar2_s1.toString()).asReference();
barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(1,barWithCount.getReferenceCount());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the undo section of the second-time rename.
*/
@Test public void testRenameUndo_3() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
INodeDirectory dir3=fsdir.getINode4Write(sdir3.toString()).asDirectory();
INodeDirectory mockDir3=spy(dir3);
doReturn(false).when(mockDir3).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt());
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3,mockDir3,fsdir.getINodeMap());
final Path foo_dir2=new Path(sdir2,"foo2");
final Path foo_dir3=new Path(sdir3,"foo3");
hdfs.rename(foo,foo_dir2);
boolean result=hdfs.rename(foo_dir2,foo_dir3);
assertFalse(result);
INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Snapshot s2=dir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
List dir2Diffs=dir2Node.getDiffs().asList();
assertEquals(1,dir2Diffs.size());
assertEquals(s2.getId(),dir2Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff=dir2Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(1,childrenDiff.getList(ListType.CREATED).size());
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo2");
assertFalse(hdfs.exists(foo_s2));
INode fooNode=fsdir.getINode4Write(foo_dir2.toString());
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
assertTrue(fooNode instanceof INodeReference.DstReference);
List fooDiffs=fooNode.asDirectory().getDiffs().asList();
assertEquals(1,fooDiffs.size());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
hdfs.createSnapshot(sdir2,"s3");
result=hdfs.rename(foo_dir2,foo_dir3);
assertFalse(result);
dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Snapshot s3=dir2Node.getSnapshot(DFSUtil.string2Bytes("s3"));
fooNode=fsdir.getINode4Write(foo_dir2.toString());
dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
dir2Diffs=dir2Node.getDiffs().asList();
assertEquals(2,dir2Diffs.size());
assertEquals(s2.getId(),dir2Diffs.get(0).getSnapshotId());
assertEquals(s3.getId(),dir2Diffs.get(1).getSnapshotId());
childrenDiff=dir2Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(1,childrenDiff.getList(ListType.CREATED).size());
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
childrenDiff=dir2Diffs.get(1).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(0,childrenDiff.getList(ListType.CREATED).size());
final Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo2");
assertFalse(hdfs.exists(foo_s2));
assertTrue(hdfs.exists(foo_s3));
assertTrue(fooNode instanceof INodeReference.DstReference);
fooDiffs=fooNode.asDirectory().getDiffs().asList();
assertEquals(2,fooDiffs.size());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
assertEquals(s3.getId(),fooDiffs.get(1).getSnapshotId());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
* again -> delete snapshot s on dst tree
* Make sure we only delete the snapshot s under the renamed dir.
*/
@Test public void testRenameDirAndDeleteSnapshot_4() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
final Path foo2=new Path(sdir2,"foo");
hdfs.rename(foo,foo2);
final Path bar2=new Path(foo2,"bar2");
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
final Path bar3=new Path(foo2,"bar3");
DFSTestUtil.createFile(hdfs,bar3,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir2,"s3");
hdfs.rename(foo2,foo);
hdfs.deleteSnapshot(sdir2,"s3");
final INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Quota.Counts q1=dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(9,q1.get(Quota.NAMESPACE));
final INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Quota.Counts q2=dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2,q2.get(Quota.NAMESPACE));
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1",foo.getName());
final INode fooRef=fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc=(WithCount)fooRef.asReference().getReferredINode();
assertEquals(2,wc.getReferenceCount());
INodeDirectory fooNode=wc.getReferredINode().asDirectory();
ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(3,children.size());
assertEquals(bar.getName(),children.get(0).getLocalName());
assertEquals(bar2.getName(),children.get(1).getLocalName());
assertEquals(bar3.getName(),children.get(2).getLocalName());
List diffList=fooNode.getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),diffList.get(0).getSnapshotId());
ChildrenDiff diff=diffList.get(0).getChildrenDiff();
assertEquals(2,diff.getList(ListType.CREATED).size());
assertEquals(0,diff.getList(ListType.DELETED).size());
final INode fooRef2=fsdir.getINode4Write(foo.toString());
assertTrue(fooRef2 instanceof INodeReference.DstReference);
INodeReference.WithCount wc2=(WithCount)fooRef2.asReference().getReferredINode();
assertSame(wc,wc2);
assertSame(fooRef2,wc.getParentReference());
restartClusterAndCheckImage(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test rename a dir multiple times across snapshottable directories:
* /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo
* Create snapshots after each rename.
*/
@Test public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo_dir1=new Path(sdir1,"foo");
final Path bar1_dir1=new Path(foo_dir1,"bar1");
final Path bar_dir1=new Path(sdir1,"bar");
DFSTestUtil.createFile(hdfs,bar1_dir1,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar_dir1,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s3");
final Path foo_dir2=new Path(sdir2,"foo");
hdfs.rename(foo_dir1,foo_dir2);
final Path bar_dir2=new Path(sdir2,"bar");
hdfs.rename(bar_dir1,bar_dir2);
final Path bar1_dir2=new Path(foo_dir2,"bar1");
hdfs.setReplication(bar1_dir2,REPL_1);
hdfs.setReplication(bar_dir2,REPL_1);
restartClusterAndCheckImage(true);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s11");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s22");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s33");
final Path foo_dir3=new Path(sdir3,"foo");
hdfs.rename(foo_dir2,foo_dir3);
final Path bar_dir3=new Path(sdir3,"bar");
hdfs.rename(bar_dir2,bar_dir3);
final Path bar1_dir3=new Path(foo_dir3,"bar1");
hdfs.setReplication(bar1_dir3,REPL_2);
hdfs.setReplication(bar_dir3,REPL_2);
restartClusterAndCheckImage(true);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s111");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s222");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s333");
final Path bar1_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo/bar1");
final Path bar1_s22=SnapshotTestHelper.getSnapshotPath(sdir2,"s22","foo/bar1");
final Path bar1_s333=SnapshotTestHelper.getSnapshotPath(sdir3,"s333","foo/bar1");
final Path bar_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","bar");
final Path bar_s22=SnapshotTestHelper.getSnapshotPath(sdir2,"s22","bar");
final Path bar_s333=SnapshotTestHelper.getSnapshotPath(sdir3,"s333","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
FileStatus statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir3);
assertEquals(REPL_2,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s22);
assertEquals(REPL_1,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s333);
assertEquals(REPL_2,statusBar1.getReplication());
FileStatus statusBar=hdfs.getFileStatus(bar_s1);
assertEquals(REPL,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_dir3);
assertEquals(REPL_2,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s22);
assertEquals(REPL_1,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s333);
assertEquals(REPL_2,statusBar.getReplication());
hdfs.rename(foo_dir3,foo_dir2);
hdfs.rename(bar_dir3,bar_dir2);
hdfs.setReplication(bar1_dir2,REPL);
hdfs.setReplication(bar_dir2,REPL);
restartClusterAndCheckImage(true);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1111");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2222");
final Path bar1_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","foo/bar1");
final Path bar_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar1_s2222));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
assertTrue(hdfs.exists(bar_s2222));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s22);
assertEquals(REPL_1,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s333);
assertEquals(REPL_2,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s2222);
assertEquals(REPL,statusBar1.getReplication());
statusBar=hdfs.getFileStatus(bar_s1);
assertEquals(REPL,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_dir2);
assertEquals(REPL,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s22);
assertEquals(REPL_1,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s333);
assertEquals(REPL_2,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s2222);
assertEquals(REPL,statusBar.getReplication());
hdfs.rename(foo_dir2,foo_dir1);
hdfs.rename(bar_dir2,bar_dir1);
INodeDirectory sdir1Node=fsdir.getINode(sdir1.toString()).asDirectory();
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
INodeDirectory sdir3Node=fsdir.getINode(sdir3.toString()).asDirectory();
INodeReference fooRef=fsdir.getINode4Write(foo_dir1.toString()).asReference();
INodeReference.WithCount fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(5,fooWithCount.getReferenceCount());
INodeDirectory foo=fooWithCount.asDirectory();
List fooDiffs=foo.getDiffs().asList();
assertEquals(4,fooDiffs.size());
Snapshot s2222=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2222"));
Snapshot s333=sdir3Node.getSnapshot(DFSUtil.string2Bytes("s333"));
Snapshot s22=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s22"));
Snapshot s1=sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s2222.getId(),fooDiffs.get(3).getSnapshotId());
assertEquals(s333.getId(),fooDiffs.get(2).getSnapshotId());
assertEquals(s22.getId(),fooDiffs.get(1).getSnapshotId());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
INodeFile bar1=fsdir.getINode4Write(bar1_dir1.toString()).asFile();
List bar1Diffs=bar1.getDiffs().asList();
assertEquals(3,bar1Diffs.size());
assertEquals(s333.getId(),bar1Diffs.get(2).getSnapshotId());
assertEquals(s22.getId(),bar1Diffs.get(1).getSnapshotId());
assertEquals(s1.getId(),bar1Diffs.get(0).getSnapshotId());
INodeReference barRef=fsdir.getINode4Write(bar_dir1.toString()).asReference();
INodeReference.WithCount barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(5,barWithCount.getReferenceCount());
INodeFile bar=barWithCount.asFile();
List barDiffs=bar.getDiffs().asList();
assertEquals(4,barDiffs.size());
assertEquals(s2222.getId(),barDiffs.get(3).getSnapshotId());
assertEquals(s333.getId(),barDiffs.get(2).getSnapshotId());
assertEquals(s22.getId(),barDiffs.get(1).getSnapshotId());
assertEquals(s1.getId(),barDiffs.get(0).getSnapshotId());
restartClusterAndCheckImage(true);
hdfs.delete(foo_dir1,true);
hdfs.delete(bar_dir1,true);
restartClusterAndCheckImage(true);
final Path bar1_s1111=SnapshotTestHelper.getSnapshotPath(sdir1,"s1111","foo/bar1");
final Path bar_s1111=SnapshotTestHelper.getSnapshotPath(sdir1,"s1111","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar1_s2222));
assertFalse(hdfs.exists(bar1_s1111));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
assertTrue(hdfs.exists(bar_s2222));
assertFalse(hdfs.exists(bar_s1111));
final Path foo_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","foo");
fooRef=fsdir.getINode(foo_s2222.toString()).asReference();
fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(4,fooWithCount.getReferenceCount());
foo=fooWithCount.asDirectory();
fooDiffs=foo.getDiffs().asList();
assertEquals(4,fooDiffs.size());
assertEquals(s2222.getId(),fooDiffs.get(3).getSnapshotId());
bar1Diffs=bar1.getDiffs().asList();
assertEquals(3,bar1Diffs.size());
assertEquals(s333.getId(),bar1Diffs.get(2).getSnapshotId());
barRef=fsdir.getINode(bar_s2222.toString()).asReference();
barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(4,barWithCount.getReferenceCount());
bar=barWithCount.asFile();
barDiffs=bar.getDiffs().asList();
assertEquals(4,barDiffs.size());
assertEquals(s2222.getId(),barDiffs.get(3).getSnapshotId());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Test rename while the rename operation will exceed the quota in the dst
* tree.
*/
@Test public void testRenameUndo_5() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
final Path subdir2=new Path(dir2,"subdir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(subdir2);
final Path foo=new Path(dir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
hdfs.setQuota(dir2,5,Long.MAX_VALUE - 1);
final Path foo2=new Path(subdir2,foo.getName());
boolean rename=hdfs.rename(foo,foo2);
assertFalse(rename);
assertTrue(hdfs.exists(foo));
assertTrue(hdfs.exists(bar));
INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory();
List childrenList=ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode fooNode=childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot());
INode barNode=fsdir.getINode4Write(bar.toString());
assertTrue(barNode.getClass() == INodeFile.class);
assertSame(fooNode,barNode.getParent());
List diffList=dir1Node.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
assertTrue(dir2Node.isSnapshottable());
Quota.Counts counts=dir2Node.computeQuotaUsage();
assertEquals(3,counts.get(Quota.NAMESPACE));
assertEquals(0,counts.get(Quota.DISKSPACE));
childrenList=ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode subdir2Node=childrenList.get(0);
assertSame(dir2Node,subdir2Node.getParent());
assertSame(subdir2Node,fsdir.getINode4Write(subdir2.toString()));
diffList=dir2Node.getDiffs().asList();
assertEquals(1,diffList.size());
diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
* -> delete snapshot s on dst tree
* Make sure we destroy everything created after the rename under the renamed
* dir.
*/
@Test public void testRenameDirAndDeleteSnapshot_3() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
final Path foo2=new Path(sdir2,"foo");
hdfs.rename(foo,foo2);
final Path bar2=new Path(foo2,"bar2");
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
final Path bar3=new Path(foo2,"bar3");
DFSTestUtil.createFile(hdfs,bar3,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir2,"s3");
hdfs.delete(foo2,true);
hdfs.deleteSnapshot(sdir2,"s3");
final INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Quota.Counts q1=dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(4,q1.get(Quota.NAMESPACE));
final INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Quota.Counts q2=dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2,q2.get(Quota.NAMESPACE));
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1",foo.getName());
INode fooRef=fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc=(WithCount)fooRef.asReference().getReferredINode();
assertEquals(1,wc.getReferenceCount());
INodeDirectory fooNode=wc.getReferredINode().asDirectory();
ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,children.size());
assertEquals(bar.getName(),children.get(0).getLocalName());
List diffList=fooNode.getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),diffList.get(0).getSnapshotId());
ChildrenDiff diff=diffList.get(0).getChildrenDiff();
assertEquals(0,diff.getList(ListType.CREATED).size());
assertEquals(0,diff.getList(ListType.DELETED).size());
restartClusterAndCheckImage(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Rename and deletion snapshot under the same the snapshottable directory.
*/
@Test public void testRenameDirAndDeleteSnapshot_6() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
final Path foo=new Path(dir2,"foo");
final Path bar=new Path(foo,"bar");
final Path file=new Path(bar,"file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,test,"s0");
hdfs.delete(file,true);
final Path newfoo=new Path(dir1,foo.getName());
hdfs.rename(foo,newfoo);
final Path foo_s0=SnapshotTestHelper.getSnapshotPath(test,"s0","dir2/foo");
assertTrue("the snapshot path " + foo_s0 + " should exist",hdfs.exists(foo_s0));
hdfs.deleteSnapshot(test,"s0");
assertFalse("after deleting s0, " + foo_s0 + " should not exist",hdfs.exists(foo_s0));
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
assertTrue("the diff list of " + dir2 + " should be empty after deleting s0",dir2Node.getDiffs().asList().isEmpty());
assertTrue(hdfs.exists(newfoo));
INode fooRefNode=fsdir.getINode4Write(newfoo.toString());
assertTrue(fooRefNode instanceof INodeReference.DstReference);
INodeDirectory fooNode=fooRefNode.asDirectory();
assertTrue(fooNode.isWithSnapshot());
assertTrue(fooNode.getDiffs().asList().isEmpty());
INodeDirectory barNode=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID).get(0).asDirectory();
assertTrue(barNode.getDiffs().asList().isEmpty());
assertTrue(barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
restartClusterAndCheckImage(true);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Test the rename undo when removing dst node fails
*/
@Test public void testRenameUndo_6() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
final Path sub_dir2=new Path(dir2,"subdir");
final Path subsub_dir2=new Path(sub_dir2,"subdir");
hdfs.mkdirs(dir1);
hdfs.mkdirs(subsub_dir2);
final Path foo=new Path(dir1,"foo");
hdfs.mkdirs(foo);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
hdfs.setQuota(dir2,4,Long.MAX_VALUE - 1);
try {
hdfs.rename(foo,subsub_dir2,Rename.OVERWRITE);
fail("Expect QuotaExceedException");
}
catch ( QuotaExceededException e) {
String msg="Failed to record modification for snapshot: " + "The NameSpace quota (directories and files)" + " is exceeded: quota=4 file count=5";
GenericTestUtils.assertExceptionContains(msg,e);
}
assertTrue(hdfs.exists(foo));
INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory();
List childrenList=ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode fooNode=childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot());
assertSame(dir1Node,fooNode.getParent());
List diffList=dir1Node.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
assertTrue(dir2Node.isSnapshottable());
Quota.Counts counts=dir2Node.computeQuotaUsage();
assertEquals(4,counts.get(Quota.NAMESPACE));
assertEquals(0,counts.get(Quota.DISKSPACE));
childrenList=ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode subdir2Node=childrenList.get(0);
assertTrue(subdir2Node.asDirectory().isWithSnapshot());
assertSame(dir2Node,subdir2Node.getParent());
assertSame(subdir2Node,fsdir.getINode4Write(sub_dir2.toString()));
INode subsubdir2Node=fsdir.getINode4Write(subsub_dir2.toString());
assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
assertSame(subdir2Node,subsubdir2Node.getParent());
diffList=(dir2Node).getDiffs().asList();
assertEquals(1,diffList.size());
diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
diffList=subdir2Node.asDirectory().getDiffs().asList();
assertEquals(0,diffList.size());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Test rename to an invalid name (xxx/.snapshot)
*/
@Test public void testRenameUndo_7() throws Exception {
final Path root=new Path("/");
final Path foo=new Path(root,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,root,snap1);
final Path invalid=new Path(foo,HdfsConstants.DOT_SNAPSHOT_DIR);
try {
hdfs.rename(bar,invalid);
fail("expect exception since invalid name is used for rename");
}
catch ( Exception e) {
GenericTestUtils.assertExceptionContains("\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name",e);
}
INodeDirectory rootNode=fsdir.getINode4Write(root.toString()).asDirectory();
INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory();
ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,children.size());
List diffList=fooNode.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
Snapshot s1=rootNode.getSnapshot(DFSUtil.string2Bytes(snap1));
assertEquals(s1.getId(),diff.getSnapshotId());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile();
assertSame(barNode,children.get(0));
assertSame(fooNode,barNode.getParent());
List barDiffList=barNode.getDiffs().asList();
assertEquals(1,barDiffList.size());
FileDiff barDiff=barDiffList.get(0);
assertEquals(s1.getId(),barDiff.getSnapshotId());
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPL).build();
cluster.waitActive();
restartClusterAndCheckImage(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Rename a single file across snapshottable dirs.
*/
@Test(timeout=60000) public void testRenameFileAcrossSnapshottableDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
hdfs.setReplication(newfoo,REPL_1);
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo");
assertTrue(hdfs.exists(foo_s2));
FileStatus status=hdfs.getFileStatus(foo_s2);
assertEquals(REPL,status.getReplication());
final Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo");
assertFalse(hdfs.exists(foo_s3));
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
INodeFile sfoo=fsdir.getINode(newfoo.toString()).asFile();
assertEquals(s2.getId(),sfoo.getDiffs().getLastSnapshotId());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* After the following steps:
*
* 1. Take snapshot s1 on /dir1 at time t1.
* 2. Take snapshot s2 on /dir2 at time t2.
* 3. Modify the subtree of /dir2/foo/ to make it a dir with snapshots.
* 4. Take snapshot s3 on /dir1 at time t3.
* 5. Rename /dir2/foo/ to /dir1/foo/.
*
* When changes happening on foo, the diff should be recorded in snapshot s2.
*/
@Test(timeout=60000) public void testRenameDirAcrossSnapshottableDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
final Path bar=new Path(foo,"bar");
final Path bar2=new Path(foo,"bar2");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.setReplication(bar2,REPL_1);
hdfs.delete(bar,true);
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
final Path snapshotBar=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar");
assertTrue(hdfs.exists(snapshotBar));
final Path newBar2=new Path(newfoo,"bar2");
assertTrue(hdfs.exists(newBar2));
hdfs.delete(newBar2,true);
final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar2");
assertTrue(hdfs.exists(bar2_s2));
FileStatus status=hdfs.getFileStatus(bar2_s2);
assertEquals(REPL,status.getReplication());
final Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* After rename, delete the snapshot in src
*/
@Test public void testRenameDirAndDeleteSnapshot_2() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
restartClusterAndCheckImage(true);
final Path bar2=new Path(newfoo,"bar2");
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir1,"s4");
hdfs.delete(newfoo,true);
final Path bar2_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar2");
assertTrue(hdfs.exists(bar2_s4));
final Path bar_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar");
assertTrue(hdfs.exists(bar_s4));
hdfs.deleteSnapshot(sdir1,"s4");
restartClusterAndCheckImage(true);
Path bar_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar");
assertFalse(hdfs.exists(bar_s3));
bar_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar");
assertTrue(hdfs.exists(bar_s3));
Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
hdfs.deleteSnapshot(sdir2,"s3");
final Path bar_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar");
assertTrue(hdfs.exists(bar_s2));
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo");
INodeReference fooRef=fsdir.getINode(foo_s2.toString()).asReference();
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount fooWC=(WithCount)fooRef.getReferredINode();
assertEquals(1,fooWC.getReferenceCount());
INodeDirectory fooDir=fooWC.getReferredINode().asDirectory();
List diffs=fooDir.getDiffs().asList();
assertEquals(1,diffs.size());
assertEquals(s2.getId(),diffs.get(0).getSnapshotId());
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir2,"s2");
assertFalse(hdfs.exists(bar_s2));
restartClusterAndCheckImage(true);
Quota.Counts q=fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(4,q.get(Quota.NAMESPACE));
assertEquals(0,q.get(Quota.DISKSPACE));
hdfs.deleteSnapshot(sdir1,"s1");
restartClusterAndCheckImage(true);
q=fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(3,q.get(Quota.NAMESPACE));
assertEquals(0,q.get(Quota.DISKSPACE));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test renaming a file and then delete snapshots.
*/
@Test public void testRenameFileAndDeleteSnapshot() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
hdfs.setReplication(newfoo,REPL_1);
hdfs.createSnapshot(sdir1,"s4");
hdfs.setReplication(newfoo,REPL_2);
FileStatus status=hdfs.getFileStatus(newfoo);
assertEquals(REPL_2,status.getReplication());
final Path foo_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo");
status=hdfs.getFileStatus(foo_s4);
assertEquals(REPL_1,status.getReplication());
hdfs.createSnapshot(sdir1,"s5");
final Path foo_s5=SnapshotTestHelper.getSnapshotPath(sdir1,"s5","foo");
status=hdfs.getFileStatus(foo_s5);
assertEquals(REPL_2,status.getReplication());
hdfs.deleteSnapshot(sdir1,"s5");
restartClusterAndCheckImage(true);
assertFalse(hdfs.exists(foo_s5));
status=hdfs.getFileStatus(foo_s4);
assertEquals(REPL_1,status.getReplication());
hdfs.deleteSnapshot(sdir1,"s4");
assertFalse(hdfs.exists(foo_s4));
Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo");
assertFalse(hdfs.exists(foo_s3));
foo_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo");
assertFalse(hdfs.exists(foo_s3));
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo");
assertTrue(hdfs.exists(foo_s2));
status=hdfs.getFileStatus(foo_s2);
assertEquals(REPL,status.getReplication());
INodeFile snode=fsdir.getINode(newfoo.toString()).asFile();
assertEquals(1,snode.getDiffs().asList().size());
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
assertEquals(s2.getId(),snode.getDiffs().getLastSnapshotId());
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir2,"s2");
assertFalse(hdfs.exists(foo_s2));
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1,"s3");
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1,"s1");
restartClusterAndCheckImage(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test renaming a dir and then delete snapshots.
*/
@Test public void testRenameDirAndDeleteSnapshot_1() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
final Path bar=new Path(foo,"bar");
final Path bar2=new Path(foo,"bar2");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
final Path newbar=new Path(newfoo,bar.getName());
final Path newbar2=new Path(newfoo,bar2.getName());
final Path newbar3=new Path(newfoo,"bar3");
DFSTestUtil.createFile(hdfs,newbar3,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir1,"s4");
hdfs.delete(newbar,true);
hdfs.delete(newbar3,true);
assertFalse(hdfs.exists(newbar3));
assertFalse(hdfs.exists(bar));
final Path bar_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar");
final Path bar3_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar3");
assertTrue(hdfs.exists(bar_s4));
assertTrue(hdfs.exists(bar3_s4));
hdfs.createSnapshot(sdir1,"s5");
hdfs.delete(newbar2,true);
assertFalse(hdfs.exists(bar2));
final Path bar2_s5=SnapshotTestHelper.getSnapshotPath(sdir1,"s5","foo/bar2");
assertTrue(hdfs.exists(bar2_s5));
hdfs.deleteSnapshot(sdir1,"s5");
restartClusterAndCheckImage(true);
assertFalse(hdfs.exists(bar2_s5));
final Path bar2_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar2");
assertTrue(hdfs.exists(bar2_s4));
hdfs.deleteSnapshot(sdir1,"s4");
assertFalse(hdfs.exists(bar_s4));
Path bar_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar");
assertFalse(hdfs.exists(bar_s3));
bar_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar");
assertFalse(hdfs.exists(bar_s3));
final Path bar_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar");
assertTrue(hdfs.exists(bar_s2));
assertFalse(hdfs.exists(bar2_s4));
Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar2");
assertTrue(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar3_s4));
Path bar3_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar3");
assertFalse(hdfs.exists(bar3_s3));
bar3_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar3");
assertFalse(hdfs.exists(bar3_s3));
final Path bar3_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar3");
assertFalse(hdfs.exists(bar3_s2));
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir2,"s2");
assertFalse(hdfs.exists(bar_s2));
assertFalse(hdfs.exists(bar2_s2));
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1,"s3");
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1,"s1");
restartClusterAndCheckImage(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Test clear quota of a snapshottable dir or a dir with snapshot.
*/
@Test public void testClearQuota() throws Exception {
final Path dir=new Path("/TestSnapshot");
hdfs.mkdirs(dir);
hdfs.allowSnapshot(dir);
hdfs.setQuota(dir,HdfsConstants.QUOTA_DONT_SET,HdfsConstants.QUOTA_DONT_SET);
INodeDirectory dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(0,dirNode.getDiffs().asList().size());
hdfs.setQuota(dir,HdfsConstants.QUOTA_DONT_SET - 1,HdfsConstants.QUOTA_DONT_SET - 1);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(0,dirNode.getDiffs().asList().size());
hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET,HdfsConstants.QUOTA_RESET);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(0,dirNode.getDiffs().asList().size());
SnapshotTestHelper.createSnapshot(hdfs,dir,"s1");
hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET,HdfsConstants.QUOTA_RESET);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(1,dirNode.getDiffs().asList().size());
SnapshottableDirectoryStatus[] status=hdfs.getSnapshottableDirListing();
assertEquals(1,status.length);
assertEquals(dir,status[0].getFullPath());
final Path subDir=new Path(dir,"sub");
hdfs.mkdirs(subDir);
hdfs.createSnapshot(dir,"s2");
final Path file=new Path(subDir,"file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed);
hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET,HdfsConstants.QUOTA_RESET);
INode subNode=fsdir.getINode4Write(subDir.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
List diffList=subNode.asDirectory().getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s2=dirNode.getSnapshot(DFSUtil.string2Bytes("s2"));
assertEquals(s2.getId(),diffList.get(0).getSnapshotId());
List createdList=diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
assertEquals(1,createdList.size());
assertSame(fsdir.getINode4Write(file.toString()),createdList.get(0));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testSetQuota() throws Exception {
final Path dir=new Path("/TestSnapshot");
hdfs.mkdirs(dir);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s1");
Path sub=new Path(dir,"sub");
hdfs.mkdirs(sub);
Path fileInSub=new Path(sub,"file");
DFSTestUtil.createFile(hdfs,fileInSub,BLOCKSIZE,REPLICATION,seed);
INodeDirectory subNode=INodeDirectory.valueOf(fsdir.getINode(sub.toString()),sub);
assertFalse(subNode.isWithSnapshot());
hdfs.setQuota(sub,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1);
subNode=INodeDirectory.valueOf(fsdir.getINode(sub.toString()),sub);
assertTrue(subNode.isQuotaSet());
assertFalse(subNode.isWithSnapshot());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* A simple test that updates a sub-directory of a snapshottable directory
* with snapshots
*/
@Test(timeout=60000) public void testUpdateDirectory() throws Exception {
Path dir=new Path("/dir");
Path sub=new Path(dir,"sub");
Path subFile=new Path(sub,"file");
DFSTestUtil.createFile(hdfs,subFile,BLOCKSIZE,REPLICATION,seed);
FileStatus oldStatus=hdfs.getFileStatus(sub);
hdfs.allowSnapshot(dir);
hdfs.createSnapshot(dir,"s1");
hdfs.setTimes(sub,100L,100L);
Path snapshotPath=SnapshotTestHelper.getSnapshotPath(dir,"s1","sub");
FileStatus snapshotStatus=hdfs.getFileStatus(snapshotPath);
assertEquals(oldStatus.getModificationTime(),snapshotStatus.getModificationTime());
assertEquals(oldStatus.getAccessTime(),snapshotStatus.getAccessTime());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot
*/
@Test public void testDeletionWithZeroSizeBlock() throws Exception {
final Path foo=new Path("/foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPLICATION,0L);
SnapshotTestHelper.createSnapshot(hdfs,foo,"s0");
hdfs.append(bar);
INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks=barNode.getBlocks();
assertEquals(1,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
ExtendedBlock previous=new ExtendedBlock(fsn.getBlockPoolId(),blks[0]);
cluster.getNameNodeRpc().addBlock(bar.toString(),hdfs.getClient().getClientName(),previous,null,barNode.getId(),null);
SnapshotTestHelper.createSnapshot(hdfs,foo,"s1");
barNode=fsdir.getINode4Write(bar.toString()).asFile();
blks=barNode.getBlocks();
assertEquals(2,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
assertEquals(0,blks[1].getNumBytes());
hdfs.delete(bar,true);
final Path sbar=SnapshotTestHelper.getSnapshotPath(foo,"s1",bar.getName());
barNode=fsdir.getINode(sbar.toString()).asFile();
blks=barNode.getBlocks();
assertEquals(1,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* 1. rename under-construction file with 0-sized blocks after snapshot.
* 2. delete the renamed directory.
* make sure we delete the 0-sized block.
* see HDFS-5476.
*/
@Test public void testDeletionWithZeroSizeBlock3() throws Exception {
final Path foo=new Path("/foo");
final Path subDir=new Path(foo,"sub");
final Path bar=new Path(subDir,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPLICATION,0L);
hdfs.append(bar);
INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks=barNode.getBlocks();
assertEquals(1,blks.length);
ExtendedBlock previous=new ExtendedBlock(fsn.getBlockPoolId(),blks[0]);
cluster.getNameNodeRpc().addBlock(bar.toString(),hdfs.getClient().getClientName(),previous,null,barNode.getId(),null);
SnapshotTestHelper.createSnapshot(hdfs,foo,"s1");
final Path bar2=new Path(subDir,"bar2");
hdfs.rename(bar,bar2);
INodeFile bar2Node=fsdir.getINode4Write(bar2.toString()).asFile();
blks=bar2Node.getBlocks();
assertEquals(2,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
assertEquals(0,blks[1].getNumBytes());
hdfs.delete(subDir,true);
final Path sbar=SnapshotTestHelper.getSnapshotPath(foo,"s1","sub/bar");
barNode=fsdir.getINode(sbar.toString()).asFile();
blks=barNode.getBlocks();
assertEquals(1,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Make sure we delete 0-sized block when deleting an under-construction file
*/
@Test public void testDeletionWithZeroSizeBlock2() throws Exception {
final Path foo=new Path("/foo");
final Path subDir=new Path(foo,"sub");
final Path bar=new Path(subDir,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPLICATION,0L);
hdfs.append(bar);
INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks=barNode.getBlocks();
assertEquals(1,blks.length);
ExtendedBlock previous=new ExtendedBlock(fsn.getBlockPoolId(),blks[0]);
cluster.getNameNodeRpc().addBlock(bar.toString(),hdfs.getClient().getClientName(),previous,null,barNode.getId(),null);
SnapshotTestHelper.createSnapshot(hdfs,foo,"s1");
barNode=fsdir.getINode4Write(bar.toString()).asFile();
blks=barNode.getBlocks();
assertEquals(2,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
assertEquals(0,blks[1].getNumBytes());
hdfs.delete(subDir,true);
final Path sbar=SnapshotTestHelper.getSnapshotPath(foo,"s1","sub/bar");
barNode=fsdir.getINode(sbar.toString()).asFile();
blks=barNode.getBlocks();
assertEquals(1,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier HybridVerifier
/**
* Test deleting a file with snapshots. Need to check the blocksMap to make
* sure the corresponding record is updated correctly.
*/
@Test(timeout=60000) public void testDeletionWithSnapshots() throws Exception {
Path file0=new Path(sub1,"file0");
Path file1=new Path(sub1,"file1");
Path sub2=new Path(sub1,"sub2");
Path file2=new Path(sub2,"file2");
Path file3=new Path(sub1,"file3");
Path file4=new Path(sub1,"file4");
Path file5=new Path(sub1,"file5");
DFSTestUtil.createFile(hdfs,file0,4 * BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,file1,2 * BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,file2,3 * BLOCKSIZE,REPLICATION,seed);
{
final INodeFile f2=assertBlockCollection(file2.toString(),3,fsdir,blockmanager);
BlockInfo[] blocks=f2.getBlocks();
hdfs.delete(sub2,true);
for ( BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
}
final String[] snapshots={"s0","s1","s2"};
DFSTestUtil.createFile(hdfs,file3,5 * BLOCKSIZE,REPLICATION,seed);
SnapshotTestHelper.createSnapshot(hdfs,sub1,snapshots[0]);
DFSTestUtil.createFile(hdfs,file4,1 * BLOCKSIZE,REPLICATION,seed);
SnapshotTestHelper.createSnapshot(hdfs,sub1,snapshots[1]);
DFSTestUtil.createFile(hdfs,file5,7 * BLOCKSIZE,REPLICATION,seed);
SnapshotTestHelper.createSnapshot(hdfs,sub1,snapshots[2]);
{
INodeFile f1=assertBlockCollection(file1.toString(),2,fsdir,blockmanager);
Assert.assertSame(INodeFile.class,f1.getClass());
hdfs.setReplication(file1,(short)2);
f1=assertBlockCollection(file1.toString(),2,fsdir,blockmanager);
assertTrue(f1.isWithSnapshot());
assertFalse(f1.isUnderConstruction());
}
final INodeFile f0=assertBlockCollection(file0.toString(),4,fsdir,blockmanager);
BlockInfo[] blocks0=f0.getBlocks();
Path snapshotFile0=SnapshotTestHelper.getSnapshotPath(sub1,"s0",file0.getName());
assertBlockCollection(snapshotFile0.toString(),4,fsdir,blockmanager);
hdfs.delete(file0,true);
for ( BlockInfo b : blocks0) {
assertNotNull(blockmanager.getBlockCollection(b));
}
assertBlockCollection(snapshotFile0.toString(),4,fsdir,blockmanager);
String s1f0=SnapshotTestHelper.getSnapshotPath(sub1,"s1",file0.getName()).toString();
assertBlockCollection(s1f0,4,fsdir,blockmanager);
hdfs.deleteSnapshot(sub1,"s1");
for ( BlockInfo b : blocks0) {
assertNotNull(blockmanager.getBlockCollection(b));
}
assertBlockCollection(snapshotFile0.toString(),4,fsdir,blockmanager);
try {
INodeFile.valueOf(fsdir.getINode(s1f0),s1f0);
fail("Expect FileNotFoundException when identifying the INode in a deleted Snapshot");
}
catch ( IOException e) {
assertExceptionContains("File does not exist: " + s1f0,e);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* When combine two snapshots, make sure files/directories created after the
* prior snapshot get destroyed.
*/
@Test(timeout=300000) public void testCombineSnapshotDiff3() throws Exception {
Path dir=new Path("/dir");
Path subDir1=new Path(dir,"subdir1");
Path subDir2=new Path(dir,"subdir2");
hdfs.mkdirs(subDir2);
Path subsubDir=new Path(subDir1,"subsubdir");
hdfs.mkdirs(subsubDir);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s1");
Path newDir=new Path(subsubDir,"newdir");
Path newFile=new Path(newDir,"newfile");
DFSTestUtil.createFile(hdfs,newFile,BLOCKSIZE,REPLICATION,seed);
Path newFile2=new Path(subDir2,"newfile");
DFSTestUtil.createFile(hdfs,newFile2,BLOCKSIZE,REPLICATION,seed);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s2");
checkQuotaUsageComputation(dir,11,BLOCKSIZE * 2 * REPLICATION);
hdfs.delete(subsubDir,true);
hdfs.delete(subDir2,true);
checkQuotaUsageComputation(dir,14,BLOCKSIZE * 2 * REPLICATION);
hdfs.deleteSnapshot(dir,"s2");
checkQuotaUsageComputation(dir,8,0);
Path subdir1_s1=SnapshotTestHelper.getSnapshotPath(dir,"s1",subDir1.getName());
Path subdir1_s2=SnapshotTestHelper.getSnapshotPath(dir,"s2",subDir1.getName());
assertTrue(hdfs.exists(subdir1_s1));
assertFalse(hdfs.exists(subdir1_s2));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test deleting the earliest (first) snapshot. In this simplest scenario, the
* snapshots are taken on the same directory, and we do not need to combine
* snapshot diffs.
*/
@Test(timeout=300000) public void testDeleteEarliestSnapshot1() throws Exception {
Path file0=new Path(sub,"file0");
Path file1=new Path(sub,"file1");
DFSTestUtil.createFile(hdfs,file0,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed);
String snapshotName="s1";
try {
hdfs.deleteSnapshot(sub,snapshotName);
fail("SnapshotException expected: " + sub.toString() + " is not snapshottable yet");
}
catch ( Exception e) {
GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory: " + sub,e);
}
hdfs.allowSnapshot(sub);
try {
hdfs.deleteSnapshot(sub,snapshotName);
fail("SnapshotException expected: snapshot " + snapshotName + " does not exist for "+ sub.toString());
}
catch ( Exception e) {
GenericTestUtils.assertExceptionContains("Cannot delete snapshot " + snapshotName + " from path "+ sub.toString()+ ": the snapshot does not exist.",e);
}
SnapshotTestHelper.createSnapshot(hdfs,sub,snapshotName);
checkQuotaUsageComputation(sub,4,BLOCKSIZE * REPLICATION * 2);
hdfs.deleteSnapshot(sub,snapshotName);
checkQuotaUsageComputation(sub,3,BLOCKSIZE * REPLICATION * 2);
hdfs.createSnapshot(sub,snapshotName);
checkQuotaUsageComputation(sub,4,BLOCKSIZE * REPLICATION * 2);
Path newFile=new Path(sub,"newFile");
DFSTestUtil.createFile(hdfs,newFile,BLOCKSIZE,REPLICATION,seed);
String snapshotName2="s2";
hdfs.createSnapshot(sub,snapshotName2);
checkQuotaUsageComputation(sub,6,BLOCKSIZE * REPLICATION * 3);
Path ss=SnapshotTestHelper.getSnapshotPath(sub,snapshotName2,"newFile");
FileStatus statusBeforeDeletion=hdfs.getFileStatus(ss);
hdfs.deleteSnapshot(sub,snapshotName);
checkQuotaUsageComputation(sub,5,BLOCKSIZE * REPLICATION * 3);
FileStatus statusAfterDeletion=hdfs.getFileStatus(ss);
System.out.println("Before deletion: " + statusBeforeDeletion.toString() + "\n"+ "After deletion: "+ statusAfterDeletion.toString());
assertEquals(statusBeforeDeletion.toString(),statusAfterDeletion.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test deleting a directory which is a descendant of a snapshottable
* directory. In the test we need to cover the following cases:
*
* 1. Delete current INodeFile/INodeDirectory without taking any snapshot.
* 2. Delete current INodeFile/INodeDirectory while snapshots have been taken
* on ancestor(s).
* 3. Delete current INodeFileWithSnapshot.
* 4. Delete current INodeDirectoryWithSnapshot.
*
*/
@Test(timeout=300000) public void testDeleteCurrentFileDirectory() throws Exception {
Path deleteDir=new Path(subsub,"deleteDir");
Path deleteFile=new Path(deleteDir,"deleteFile");
Path noChangeDirParent=new Path(sub,"noChangeDirParent");
Path noChangeDir=new Path(noChangeDirParent,"noChangeDir");
Path noChangeFile=new Path(noChangeDir,"noChangeFile");
DFSTestUtil.createFile(hdfs,deleteFile,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,noChangeFile,BLOCKSIZE,REPLICATION,seed);
Path metaChangeFile1=new Path(subsub,"metaChangeFile1");
DFSTestUtil.createFile(hdfs,metaChangeFile1,BLOCKSIZE,REPLICATION,seed);
Path metaChangeFile2=new Path(noChangeDir,"metaChangeFile2");
DFSTestUtil.createFile(hdfs,metaChangeFile2,BLOCKSIZE,REPLICATION,seed);
hdfs.delete(deleteDir,true);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s0");
Path tempDir=new Path(dir,"tempdir");
Path tempFile=new Path(tempDir,"tempfile");
DFSTestUtil.createFile(hdfs,tempFile,BLOCKSIZE,REPLICATION,seed);
final INodeFile temp=TestSnapshotBlocksMap.assertBlockCollection(tempFile.toString(),1,fsdir,blockmanager);
BlockInfo[] blocks=temp.getBlocks();
hdfs.delete(tempDir,true);
checkQuotaUsageComputation(dir,9L,BLOCKSIZE * REPLICATION * 3);
for ( BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
Path newFileAfterS0=new Path(subsub,"newFile");
DFSTestUtil.createFile(hdfs,newFileAfterS0,BLOCKSIZE,REPLICATION,seed);
hdfs.setReplication(metaChangeFile1,REPLICATION_1);
hdfs.setReplication(metaChangeFile2,REPLICATION_1);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s1");
checkQuotaUsageComputation(dir,14L,BLOCKSIZE * REPLICATION * 4);
Snapshot snapshot0=fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s0"));
Snapshot snapshot1=fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s1"));
hdfs.delete(noChangeDirParent,true);
checkQuotaUsageComputation(dir,17L,BLOCKSIZE * REPLICATION * 4);
Path snapshotNoChangeDir=SnapshotTestHelper.getSnapshotPath(dir,"s1",sub.getName() + "/" + noChangeDirParent.getName()+ "/"+ noChangeDir.getName());
INodeDirectory snapshotNode=(INodeDirectory)fsdir.getINode(snapshotNoChangeDir.toString());
assertEquals(INodeDirectory.class,snapshotNode.getClass());
ReadOnlyList children=snapshotNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(2,children.size());
INode noChangeFileSCopy=children.get(1);
assertEquals(noChangeFile.getName(),noChangeFileSCopy.getLocalName());
assertEquals(INodeFile.class,noChangeFileSCopy.getClass());
TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,noChangeFileSCopy.getLocalName()).toString(),1,fsdir,blockmanager);
INodeFile metaChangeFile2SCopy=children.get(0).asFile();
assertEquals(metaChangeFile2.getName(),metaChangeFile2SCopy.getLocalName());
assertTrue(metaChangeFile2SCopy.isWithSnapshot());
assertFalse(metaChangeFile2SCopy.isUnderConstruction());
TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,metaChangeFile2SCopy.getLocalName()).toString(),1,fsdir,blockmanager);
assertEquals(REPLICATION_1,metaChangeFile2SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
assertEquals(REPLICATION_1,metaChangeFile2SCopy.getFileReplication(snapshot1.getId()));
assertEquals(REPLICATION,metaChangeFile2SCopy.getFileReplication(snapshot0.getId()));
Path newFile=new Path(sub,"newFile");
DFSTestUtil.createFile(hdfs,newFile,BLOCKSIZE,REPLICATION,seed);
final INodeFile newFileNode=TestSnapshotBlocksMap.assertBlockCollection(newFile.toString(),1,fsdir,blockmanager);
blocks=newFileNode.getBlocks();
checkQuotaUsageComputation(dir,18L,BLOCKSIZE * REPLICATION * 5);
hdfs.delete(sub,true);
checkQuotaUsageComputation(dir,19L,BLOCKSIZE * REPLICATION * 4);
for ( BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
Path snapshotSub=SnapshotTestHelper.getSnapshotPath(dir,"s1",sub.getName());
INodeDirectory snapshotNode4Sub=fsdir.getINode(snapshotSub.toString()).asDirectory();
assertTrue(snapshotNode4Sub.isWithSnapshot());
assertEquals(1,snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
assertEquals(2,snapshotNode4Sub.getChildrenList(snapshot1.getId()).size());
INode snapshotNode4Subsub=snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).get(0);
assertTrue(snapshotNode4Subsub.asDirectory().isWithSnapshot());
assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent());
INodeDirectory snapshotSubsubDir=(INodeDirectory)snapshotNode4Subsub;
children=snapshotSubsubDir.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(2,children.size());
assertEquals(children.get(0).getLocalName(),metaChangeFile1.getName());
assertEquals(children.get(1).getLocalName(),newFileAfterS0.getName());
children=snapshotSubsubDir.getChildrenList(snapshot0.getId());
assertEquals(1,children.size());
INode child=children.get(0);
assertEquals(child.getLocalName(),metaChangeFile1.getName());
INodeFile metaChangeFile1SCopy=child.asFile();
assertTrue(metaChangeFile1SCopy.isWithSnapshot());
assertFalse(metaChangeFile1SCopy.isUnderConstruction());
assertEquals(REPLICATION_1,metaChangeFile1SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
assertEquals(REPLICATION_1,metaChangeFile1SCopy.getFileReplication(snapshot1.getId()));
assertEquals(REPLICATION,metaChangeFile1SCopy.getFileReplication(snapshot0.getId()));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* A test covering the case where the snapshot diff to be deleted is renamed
* to its previous snapshot.
*/
@Test(timeout=300000) public void testRenameSnapshotDiff() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
final Path subFile0=new Path(sub,"file0");
final Path subsubFile0=new Path(subsub,"file0");
DFSTestUtil.createFile(hdfs,subFile0,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,subsubFile0,BLOCKSIZE,REPLICATION,seed);
hdfs.setOwner(subsub,"owner","group");
SnapshotTestHelper.createSnapshot(hdfs,sub,"s0");
checkQuotaUsageComputation(sub,5,BLOCKSIZE * 6);
final Path subFile1=new Path(sub,"file1");
final Path subsubFile1=new Path(subsub,"file1");
DFSTestUtil.createFile(hdfs,subFile1,BLOCKSIZE,REPLICATION_1,seed);
DFSTestUtil.createFile(hdfs,subsubFile1,BLOCKSIZE,REPLICATION,seed);
checkQuotaUsageComputation(sub,8,BLOCKSIZE * 11);
SnapshotTestHelper.createSnapshot(hdfs,sub,"s1");
checkQuotaUsageComputation(sub,9,BLOCKSIZE * 11);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s2");
checkQuotaUsageComputation(dir,11,BLOCKSIZE * 11);
checkQuotaUsageComputation(sub,9,BLOCKSIZE * 11);
hdfs.setOwner(subsub,"unknown","unknown");
hdfs.setReplication(subsubFile1,REPLICATION_1);
checkQuotaUsageComputation(dir,13,BLOCKSIZE * 11);
checkQuotaUsageComputation(sub,11,BLOCKSIZE * 11);
hdfs.delete(subFile1,true);
checkQuotaUsageComputation(new Path("/"),16,BLOCKSIZE * 11);
checkQuotaUsageComputation(dir,15,BLOCKSIZE * 11);
checkQuotaUsageComputation(sub,13,BLOCKSIZE * 11);
Path subsubSnapshotCopy=SnapshotTestHelper.getSnapshotPath(dir,"s2",sub.getName() + Path.SEPARATOR + subsub.getName());
Path subsubFile1SCopy=SnapshotTestHelper.getSnapshotPath(dir,"s2",sub.getName() + Path.SEPARATOR + subsub.getName()+ Path.SEPARATOR+ subsubFile1.getName());
Path subFile1SCopy=SnapshotTestHelper.getSnapshotPath(dir,"s2",sub.getName() + Path.SEPARATOR + subFile1.getName());
FileStatus subsubStatus=hdfs.getFileStatus(subsubSnapshotCopy);
assertEquals("owner",subsubStatus.getOwner());
assertEquals("group",subsubStatus.getGroup());
FileStatus subsubFile1Status=hdfs.getFileStatus(subsubFile1SCopy);
assertEquals(REPLICATION,subsubFile1Status.getReplication());
FileStatus subFile1Status=hdfs.getFileStatus(subFile1SCopy);
assertEquals(REPLICATION_1,subFile1Status.getReplication());
hdfs.deleteSnapshot(dir,"s2");
checkQuotaUsageComputation(new Path("/"),14,BLOCKSIZE * 11);
checkQuotaUsageComputation(dir,13,BLOCKSIZE * 11);
checkQuotaUsageComputation(sub,12,BLOCKSIZE * 11);
try {
hdfs.getFileStatus(subsubSnapshotCopy);
fail("should throw FileNotFoundException");
}
catch ( FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: " + subsubSnapshotCopy.toString(),e);
}
try {
hdfs.getFileStatus(subsubFile1SCopy);
fail("should throw FileNotFoundException");
}
catch ( FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: " + subsubFile1SCopy.toString(),e);
}
try {
hdfs.getFileStatus(subFile1SCopy);
fail("should throw FileNotFoundException");
}
catch ( FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: " + subFile1SCopy.toString(),e);
}
subsubSnapshotCopy=SnapshotTestHelper.getSnapshotPath(sub,"s1",subsub.getName());
subsubFile1SCopy=SnapshotTestHelper.getSnapshotPath(sub,"s1",subsub.getName() + Path.SEPARATOR + subsubFile1.getName());
subFile1SCopy=SnapshotTestHelper.getSnapshotPath(sub,"s1",subFile1.getName());
subsubStatus=hdfs.getFileStatus(subsubSnapshotCopy);
assertEquals("owner",subsubStatus.getOwner());
assertEquals("group",subsubStatus.getGroup());
subsubFile1Status=hdfs.getFileStatus(subsubFile1SCopy);
assertEquals(REPLICATION,subsubFile1Status.getReplication());
subFile1Status=hdfs.getFileStatus(subFile1SCopy);
assertEquals(REPLICATION_1,subFile1Status.getReplication());
}
APIUtilityVerifier InternalCallVerifier ConditionMatcher PublicFieldVerifier
/**
* Test that we cannot read a file beyond its snapshot length
* when accessing it via a snapshot path.
*/
@Test(timeout=300000) public void testSnapshotfileLength() throws Exception {
hdfs.mkdirs(sub);
int bytesRead;
byte[] buffer=new byte[BLOCKSIZE * 8];
FSDataInputStream fis=null;
FileStatus fileStatus=null;
Path file1=new Path(sub,file1Name);
DFSTestUtil.createFile(hdfs,file1,0,REPLICATION,SEED);
DFSTestUtil.appendFile(hdfs,file1,BLOCKSIZE);
hdfs.allowSnapshot(sub);
hdfs.createSnapshot(sub,snapshot1);
DFSTestUtil.appendFile(hdfs,file1,BLOCKSIZE);
fileStatus=hdfs.getFileStatus(file1);
assertThat(fileStatus.getLen(),is((long)BLOCKSIZE * 2));
fis=hdfs.open(file1);
bytesRead=fis.read(0,buffer,0,buffer.length);
assertThat(bytesRead,is(BLOCKSIZE * 2));
fis.close();
Path file1snap1=SnapshotTestHelper.getSnapshotPath(sub,snapshot1,file1Name);
fis=hdfs.open(file1snap1);
fileStatus=hdfs.getFileStatus(file1snap1);
assertThat(fileStatus.getLen(),is((long)BLOCKSIZE));
bytesRead=fis.read(0,buffer,0,buffer.length);
assertThat(bytesRead,is(BLOCKSIZE));
fis.close();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier
/**
* Adding as part of jira HDFS-5343
* Test for checking the cat command on snapshot path it
* cannot read a file beyond snapshot file length
* @throws Exception
*/
@Test(timeout=600000) public void testSnapshotFileLengthWithCatCommand() throws Exception {
FSDataInputStream fis=null;
FileStatus fileStatus=null;
int bytesRead;
byte[] buffer=new byte[BLOCKSIZE * 8];
hdfs.mkdirs(sub);
Path file1=new Path(sub,file1Name);
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,SEED);
hdfs.allowSnapshot(sub);
hdfs.createSnapshot(sub,snapshot1);
DFSTestUtil.appendFile(hdfs,file1,BLOCKSIZE);
fileStatus=hdfs.getFileStatus(file1);
assertEquals("Unexpected file length",BLOCKSIZE * 2,fileStatus.getLen());
fis=hdfs.open(file1);
bytesRead=fis.read(buffer,0,buffer.length);
assertEquals("Unexpected # bytes read",BLOCKSIZE * 2,bytesRead);
fis.close();
Path file1snap1=SnapshotTestHelper.getSnapshotPath(sub,snapshot1,file1Name);
fis=hdfs.open(file1snap1);
fileStatus=hdfs.getFileStatus(file1snap1);
assertEquals(fileStatus.getLen(),BLOCKSIZE);
bytesRead=fis.read(buffer,0,buffer.length);
assertEquals("Unexpected # bytes read",BLOCKSIZE,bytesRead);
fis.close();
PrintStream outBackup=System.out;
PrintStream errBackup=System.err;
ByteArrayOutputStream bao=new ByteArrayOutputStream();
System.setOut(new PrintStream(bao));
System.setErr(new PrintStream(bao));
FsShell shell=new FsShell();
try {
ToolRunner.run(conf,shell,new String[]{"-cat","/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1"});
assertEquals("Unexpected # bytes from -cat",BLOCKSIZE,bao.size());
}
finally {
System.setOut(outBackup);
System.setErr(errBackup);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test FileStatus of snapshot file before/after rename
*/
@Test(timeout=60000) public void testSnapshotRename() throws Exception {
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed);
Path snapshotRoot=SnapshotTestHelper.createSnapshot(hdfs,sub1,"s1");
Path ssPath=new Path(snapshotRoot,file1.getName());
assertTrue(hdfs.exists(ssPath));
FileStatus statusBeforeRename=hdfs.getFileStatus(ssPath);
hdfs.renameSnapshot(sub1,"s1","s2");
assertFalse(hdfs.exists(ssPath));
snapshotRoot=SnapshotTestHelper.getSnapshotRoot(sub1,"s2");
ssPath=new Path(snapshotRoot,file1.getName());
assertTrue(hdfs.exists(ssPath));
FileStatus statusAfterRename=hdfs.getFileStatus(ssPath);
assertFalse(statusBeforeRename.equals(statusAfterRename));
statusBeforeRename.setPath(statusAfterRename.getPath());
assertEquals(statusBeforeRename.toString(),statusAfterRename.toString());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Test replication number calculation for a file with snapshots.
*/
@Test(timeout=60000) public void testReplicationWithSnapshot() throws Exception {
short fileRep=1;
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,fileRep,seed);
Map snapshotRepMap=new HashMap();
for (; fileRep < NUMDATANODE; ) {
Path snapshotRoot=SnapshotTestHelper.createSnapshot(hdfs,sub1,"s" + fileRep);
Path snapshot=new Path(snapshotRoot,file1.getName());
assertEquals(fileRep,getINodeFile(snapshot).getFileReplication());
snapshotRepMap.put(snapshot,fileRep);
hdfs.setReplication(file1,++fileRep);
checkFileReplication(file1,fileRep,fileRep);
checkSnapshotFileReplication(file1,snapshotRepMap,fileRep);
}
hdfs.setReplication(file1,REPLICATION);
checkFileReplication(file1,REPLICATION,(short)(NUMDATANODE - 1));
checkSnapshotFileReplication(file1,snapshotRepMap,(short)(NUMDATANODE - 1));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test replication for a file with snapshots, also including the scenario
* where the original file is deleted
*/
@Test(timeout=60000) public void testReplicationAfterDeletion() throws Exception {
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed);
Map snapshotRepMap=new HashMap();
for (int i=1; i <= 3; i++) {
Path root=SnapshotTestHelper.createSnapshot(hdfs,sub1,"s" + i);
Path ssFile=new Path(root,file1.getName());
snapshotRepMap.put(ssFile,REPLICATION);
}
checkFileReplication(file1,REPLICATION,REPLICATION);
checkSnapshotFileReplication(file1,snapshotRepMap,REPLICATION);
hdfs.delete(file1,true);
for ( Path ss : snapshotRepMap.keySet()) {
final INodeFile ssInode=getINodeFile(ss);
assertEquals(REPLICATION,ssInode.getBlockReplication());
assertEquals(snapshotRepMap.get(ss).shortValue(),ssInode.getFileReplication());
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test getting SnapshotStatsMXBean information
*/
@Test public void testSnapshotStatsMXBeanInfo() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
String pathName="/snapshot";
Path path=new Path(pathName);
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
SnapshotManager sm=cluster.getNamesystem().getSnapshotManager();
DistributedFileSystem dfs=(DistributedFileSystem)cluster.getFileSystem();
dfs.mkdirs(path);
dfs.allowSnapshot(path);
dfs.createSnapshot(path);
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=SnapshotInfo");
CompositeData[] directories=(CompositeData[])mbs.getAttribute(mxbeanName,"SnapshottableDirectories");
int numDirectories=Array.getLength(directories);
assertEquals(sm.getNumSnapshottableDirs(),numDirectories);
CompositeData[] snapshots=(CompositeData[])mbs.getAttribute(mxbeanName,"Snapshots");
int numSnapshots=Array.getLength(snapshots);
assertEquals(sm.getNumSnapshots(),numSnapshots);
CompositeData d=(CompositeData)Array.get(directories,0);
CompositeData s=(CompositeData)Array.get(snapshots,0);
assertTrue(((String)d.get("path")).contains(pathName));
assertTrue(((String)s.get("snapshotDirectory")).contains(pathName));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test the listing with different user names to make sure only directories
* that are owned by the user are listed.
*/
@Test(timeout=60000) public void testListWithDifferentUser() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
hdfs.allowSnapshot(dir1);
hdfs.allowSnapshot(dir2);
hdfs.setPermission(root,FsPermission.valueOf("-rwxrwxrwx"));
UserGroupInformation ugi1=UserGroupInformation.createUserForTesting("user1",new String[]{"group1"});
DistributedFileSystem fs1=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(ugi1,conf);
Path dir1_user1=new Path("/dir1_user1");
Path dir2_user1=new Path("/dir2_user1");
fs1.mkdirs(dir1_user1);
fs1.mkdirs(dir2_user1);
hdfs.allowSnapshot(dir1_user1);
hdfs.allowSnapshot(dir2_user1);
UserGroupInformation ugi2=UserGroupInformation.createUserForTesting("user2",new String[]{"group2"});
DistributedFileSystem fs2=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(ugi2,conf);
Path dir_user2=new Path("/dir_user2");
Path subdir_user2=new Path(dir_user2,"subdir");
fs2.mkdirs(dir_user2);
fs2.mkdirs(subdir_user2);
hdfs.allowSnapshot(dir_user2);
hdfs.allowSnapshot(subdir_user2);
String supergroup=conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY,DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
UserGroupInformation superUgi=UserGroupInformation.createUserForTesting("superuser",new String[]{supergroup});
DistributedFileSystem fs3=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(superUgi,conf);
SnapshottableDirectoryStatus[] dirs=fs3.getSnapshottableDirListing();
assertEquals(6,dirs.length);
dirs=fs1.getSnapshottableDirListing();
assertEquals(2,dirs.length);
assertEquals(dir1_user1,dirs[0].getFullPath());
assertEquals(dir2_user1,dirs[1].getFullPath());
dirs=fs2.getSnapshottableDirListing();
assertEquals(2,dirs.length);
assertEquals(dir_user2,dirs[0].getFullPath());
assertEquals(subdir_user2,dirs[1].getFullPath());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that users can copy a snapshot while preserving its xattrs.
*/
@Test(timeout=120000) public void testCopySnapshotShouldPreserveXAttrs() throws Exception {
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700));
hdfs.setXAttr(path,name1,value1);
hdfs.setXAttr(path,name2,value2);
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
Path snapshotCopy=new Path(path.toString() + "-copy");
String[] argv=new String[]{"-cp","-px",snapshotPath.toUri().toString(),snapshotCopy.toUri().toString()};
int ret=ToolRunner.run(new FsShell(conf),argv);
assertEquals("cp -px is not working on a snapshot",SUCCESS,ret);
Map xattrs=hdfs.getXAttrs(snapshotCopy);
assertArrayEquals(value1,xattrs.get(name1));
assertArrayEquals(value2,xattrs.get(name2));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testThreadSafety() throws Exception {
int numThreads=100;
Phase[] phases={LOADING_FSIMAGE,LOADING_FSIMAGE,LOADING_EDITS,LOADING_EDITS};
Step[] steps=new Step[]{new Step(INODES),new Step(DELEGATION_KEYS),new Step(INODES),new Step(DELEGATION_KEYS)};
String[] files={"file1","file1","file2","file2"};
long[] sizes={1000L,1000L,2000L,2000L};
long[] totals={10000L,20000L,30000L,40000L};
ExecutorService exec=Executors.newFixedThreadPool(numThreads);
try {
for (int i=0; i < numThreads; ++i) {
final Phase phase=phases[i % phases.length];
final Step step=steps[i % steps.length];
final String file=files[i % files.length];
final long size=sizes[i % sizes.length];
final long total=totals[i % totals.length];
exec.submit(new Callable(){
@Override public Void call(){
startupProgress.beginPhase(phase);
startupProgress.setFile(phase,file);
startupProgress.setSize(phase,size);
startupProgress.setTotal(phase,step,total);
incrementCounter(startupProgress,phase,step,100L);
startupProgress.endStep(phase,step);
startupProgress.endPhase(phase);
return null;
}
}
);
}
}
finally {
exec.shutdown();
assertTrue(exec.awaitTermination(10000L,TimeUnit.MILLISECONDS));
}
StartupProgressView view=startupProgress.createView();
assertNotNull(view);
assertEquals("file1",view.getFile(LOADING_FSIMAGE));
assertEquals(1000L,view.getSize(LOADING_FSIMAGE));
assertEquals(10000L,view.getTotal(LOADING_FSIMAGE,new Step(INODES)));
assertEquals(2500L,view.getCount(LOADING_FSIMAGE,new Step(INODES)));
assertEquals(20000L,view.getTotal(LOADING_FSIMAGE,new Step(DELEGATION_KEYS)));
assertEquals(2500L,view.getCount(LOADING_FSIMAGE,new Step(DELEGATION_KEYS)));
assertEquals("file2",view.getFile(LOADING_EDITS));
assertEquals(2000L,view.getSize(LOADING_EDITS));
assertEquals(30000L,view.getTotal(LOADING_EDITS,new Step(INODES)));
assertEquals(2500L,view.getCount(LOADING_EDITS,new Step(INODES)));
assertEquals(40000L,view.getTotal(LOADING_EDITS,new Step(DELEGATION_KEYS)));
assertEquals(2500L,view.getCount(LOADING_EDITS,new Step(DELEGATION_KEYS)));
}
APIUtilityVerifier BooleanVerifier
@Test public void testRunningState(){
setStartupProgressForRunningState(startupProgress);
MetricsRecordBuilder builder=getMetrics(metrics,true);
assertTrue(getLongCounter("ElapsedTime",builder) >= 0L);
assertGauge("PercentComplete",0.375f,builder);
assertCounter("LoadingFsImageCount",100L,builder);
assertTrue(getLongCounter("LoadingFsImageElapsedTime",builder) >= 0L);
assertCounter("LoadingFsImageTotal",100L,builder);
assertGauge("LoadingFsImagePercentComplete",1.0f,builder);
assertCounter("LoadingEditsCount",100L,builder);
assertTrue(getLongCounter("LoadingEditsElapsedTime",builder) >= 0L);
assertCounter("LoadingEditsTotal",200L,builder);
assertGauge("LoadingEditsPercentComplete",0.5f,builder);
assertCounter("SavingCheckpointCount",0L,builder);
assertCounter("SavingCheckpointElapsedTime",0L,builder);
assertCounter("SavingCheckpointTotal",0L,builder);
assertGauge("SavingCheckpointPercentComplete",0.0f,builder);
assertCounter("SafeModeCount",0L,builder);
assertCounter("SafeModeElapsedTime",0L,builder);
assertCounter("SafeModeTotal",0L,builder);
assertGauge("SafeModePercentComplete",0.0f,builder);
}
APIUtilityVerifier BooleanVerifier
@Test public void testFinalState(){
setStartupProgressForFinalState(startupProgress);
MetricsRecordBuilder builder=getMetrics(metrics,true);
assertTrue(getLongCounter("ElapsedTime",builder) >= 0L);
assertGauge("PercentComplete",1.0f,builder);
assertCounter("LoadingFsImageCount",100L,builder);
assertTrue(getLongCounter("LoadingFsImageElapsedTime",builder) >= 0L);
assertCounter("LoadingFsImageTotal",100L,builder);
assertGauge("LoadingFsImagePercentComplete",1.0f,builder);
assertCounter("LoadingEditsCount",200L,builder);
assertTrue(getLongCounter("LoadingEditsElapsedTime",builder) >= 0L);
assertCounter("LoadingEditsTotal",200L,builder);
assertGauge("LoadingEditsPercentComplete",1.0f,builder);
assertCounter("SavingCheckpointCount",300L,builder);
assertTrue(getLongCounter("SavingCheckpointElapsedTime",builder) >= 0L);
assertCounter("SavingCheckpointTotal",300L,builder);
assertGauge("SavingCheckpointPercentComplete",1.0f,builder);
assertCounter("SafeModeCount",400L,builder);
assertTrue(getLongCounter("SafeModeElapsedTime",builder) >= 0L);
assertCounter("SafeModeTotal",400L,builder);
assertGauge("SafeModePercentComplete",1.0f,builder);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier
@Test public void testDataLocality() throws Exception {
final Configuration conf=WebHdfsTestUtil.createConf();
final String[] racks={RACK0,RACK0,RACK1,RACK1,RACK2,RACK2};
final int nDataNodes=racks.length;
LOG.info("nDataNodes=" + nDataNodes + ", racks="+ Arrays.asList(racks));
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(nDataNodes).racks(racks).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final NameNode namenode=cluster.getNameNode();
final DatanodeManager dm=namenode.getNamesystem().getBlockManager().getDatanodeManager();
LOG.info("dm=" + dm);
final long blocksize=DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
final String f="/foo";
{
for (int i=0; i < nDataNodes; i++) {
final DataNode dn=cluster.getDataNodes().get(i);
final String ipAddr=dm.getDatanode(dn.getDatanodeId()).getIpAddr();
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,PutOpParam.Op.CREATE,-1L,blocksize,null);
Assert.assertEquals(ipAddr,chosen.getIpAddr());
}
}
final Path p=new Path(f);
final FSDataOutputStream out=dfs.create(p,(short)1);
out.write(1);
out.close();
final LocatedBlocks locatedblocks=NameNodeAdapter.getBlockLocations(namenode,f,0,1);
final List lb=locatedblocks.getLocatedBlocks();
Assert.assertEquals(1,lb.size());
final DatanodeInfo[] locations=lb.get(0).getLocations();
Assert.assertEquals(1,locations.length);
final DatanodeInfo expected=locations[0];
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,GetOpParam.Op.GETFILECHECKSUM,-1L,blocksize,null);
Assert.assertEquals(expected,chosen);
}
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,GetOpParam.Op.OPEN,0,blocksize,null);
Assert.assertEquals(expected,chosen);
}
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,PostOpParam.Op.APPEND,-1L,blocksize,null);
Assert.assertEquals(expected,chosen);
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier
@Test public void testExcludeDataNodes() throws Exception {
final Configuration conf=WebHdfsTestUtil.createConf();
final String[] racks={RACK0,RACK0,RACK1,RACK1,RACK2,RACK2};
final String[] hosts={"DataNode1","DataNode2","DataNode3","DataNode4","DataNode5","DataNode6"};
final int nDataNodes=hosts.length;
LOG.info("nDataNodes=" + nDataNodes + ", racks="+ Arrays.asList(racks)+ ", hosts="+ Arrays.asList(hosts));
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).hosts(hosts).numDataNodes(nDataNodes).racks(racks).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final NameNode namenode=cluster.getNameNode();
final DatanodeManager dm=namenode.getNamesystem().getBlockManager().getDatanodeManager();
LOG.info("dm=" + dm);
final long blocksize=DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
final String f="/foo";
final Path p=new Path(f);
final FSDataOutputStream out=dfs.create(p,(short)3);
out.write(1);
out.close();
final LocatedBlocks locatedblocks=NameNodeAdapter.getBlockLocations(namenode,f,0,1);
final List lb=locatedblocks.getLocatedBlocks();
Assert.assertEquals(1,lb.size());
final DatanodeInfo[] locations=lb.get(0).getLocations();
Assert.assertEquals(3,locations.length);
StringBuffer sb=new StringBuffer();
for (int i=0; i < 2; i++) {
sb.append(locations[i].getXferAddr());
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,GetOpParam.Op.GETFILECHECKSUM,-1L,blocksize,sb.toString());
for (int j=0; j <= i; j++) {
Assert.assertNotEquals(locations[j].getHostName(),chosen.getHostName());
}
}
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,GetOpParam.Op.OPEN,0,blocksize,sb.toString());
for (int j=0; j <= i; j++) {
Assert.assertNotEquals(locations[j].getHostName(),chosen.getHostName());
}
}
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,PostOpParam.Op.APPEND,-1L,blocksize,sb.toString());
for (int j=0; j <= i; j++) {
Assert.assertNotEquals(locations[j].getHostName(),chosen.getHostName());
}
}
sb.append(",");
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=60000) public void testShmBasedStaleness() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testShmBasedStaleness",sockDir);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs=cluster.getFileSystem();
final ShortCircuitCache cache=fs.getClient().getClientContext().getShortCircuitCache();
String TEST_FILE="/test_file";
final int TEST_FILE_LEN=8193;
final int SEED=0xFADED;
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
FSDataInputStream fis=fs.open(new Path(TEST_FILE));
int first=fis.read();
final ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,new Path(TEST_FILE));
Assert.assertTrue(first != -1);
cache.accept(new CacheVisitor(){
@Override public void visit( int numOutstandingMmaps, Map replicas, Map failedLoads, Map evictable, Map evictableMmapped){
ShortCircuitReplica replica=replicas.get(ExtendedBlockId.fromExtendedBlock(block));
Assert.assertNotNull(replica);
Assert.assertTrue(replica.getSlot().isValid());
}
}
);
cluster.getDataNodes().get(0).shutdown();
cache.accept(new CacheVisitor(){
@Override public void visit( int numOutstandingMmaps, Map replicas, Map failedLoads, Map evictable, Map evictableMmapped){
ShortCircuitReplica replica=replicas.get(ExtendedBlockId.fromExtendedBlock(block));
Assert.assertNotNull(replica);
Assert.assertFalse(replica.getSlot().isValid());
}
}
);
cluster.shutdown();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test unlinking a file whose blocks we are caching in the DFSClient.
* The DataNode will notify the DFSClient that the replica is stale via the
* ShortCircuitShm.
*/
@Test(timeout=60000) public void testUnlinkingReplicasInFileDescriptorCache() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testUnlinkingReplicasInFileDescriptorCache",sockDir);
conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,1000000000L);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs=cluster.getFileSystem();
final ShortCircuitCache cache=fs.getClient().getClientContext().getShortCircuitCache();
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertEquals(0,info.size());
}
}
);
final Path TEST_PATH=new Path("/test_file");
final int TEST_FILE_LEN=8193;
final int SEED=0xFADE0;
DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(fs,TEST_PATH);
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertTrue(info.get(datanode).full.isEmpty());
Assert.assertFalse(info.get(datanode).disabled);
Assert.assertEquals(1,info.get(datanode).notFull.values().size());
DfsClientShm shm=info.get(datanode).notFull.values().iterator().next();
Assert.assertFalse(shm.isDisconnected());
}
}
);
fs.delete(TEST_PATH,false);
GenericTestUtils.waitFor(new Supplier(){
MutableBoolean done=new MutableBoolean(true);
@Override public Boolean get(){
try {
done.setValue(true);
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertTrue(info.get(datanode).full.isEmpty());
Assert.assertFalse(info.get(datanode).disabled);
Assert.assertEquals(1,info.get(datanode).notFull.values().size());
DfsClientShm shm=info.get(datanode).notFull.values().iterator().next();
for (Iterator iter=shm.slotIterator(); iter.hasNext(); ) {
Slot slot=iter.next();
if (slot.isValid()) {
done.setValue(false);
}
}
}
}
);
}
catch ( IOException e) {
LOG.error("error running visitor",e);
}
return done.booleanValue();
}
}
,10,60000);
cluster.shutdown();
sockDir.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAllocShm() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testAllocShm",sockDir);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs=cluster.getFileSystem();
final ShortCircuitCache cache=fs.getClient().getClientContext().getShortCircuitCache();
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertEquals(0,info.size());
}
}
);
DomainPeer peer=getDomainPeerToDn(conf);
MutableBoolean usedPeer=new MutableBoolean(false);
ExtendedBlockId blockId=new ExtendedBlockId(123,"xyz");
final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
Slot slot=cache.allocShmSlot(datanode,peer,usedPeer,blockId,"testAllocShm_client");
Assert.assertNotNull(slot);
Assert.assertTrue(usedPeer.booleanValue());
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertEquals(1,info.size());
PerDatanodeVisitorInfo vinfo=info.get(datanode);
Assert.assertFalse(vinfo.disabled);
Assert.assertEquals(0,vinfo.full.size());
Assert.assertEquals(1,vinfo.notFull.size());
}
}
);
cache.scheduleSlotReleaser(slot);
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
final MutableBoolean done=new MutableBoolean(false);
try {
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
done.setValue(info.get(datanode).full.isEmpty() && info.get(datanode).notFull.isEmpty());
}
}
);
}
catch ( IOException e) {
LOG.error("error running visitor",e);
}
return done.booleanValue();
}
}
,10,60000);
cluster.shutdown();
sockDir.close();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAllocateSlots() throws Exception {
File path=new File(TEST_BASE,"testAllocateSlots");
path.mkdirs();
SharedFileDescriptorFactory factory=SharedFileDescriptorFactory.create("shm_",new String[]{path.getAbsolutePath()});
FileInputStream stream=factory.createDescriptor("testAllocateSlots",4096);
ShortCircuitShm shm=new ShortCircuitShm(ShmId.createRandom(),stream);
int numSlots=0;
ArrayList slots=new ArrayList();
while (!shm.isFull()) {
Slot slot=shm.allocAndRegisterSlot(new ExtendedBlockId(123L,"test_bp1"));
slots.add(slot);
numSlots++;
}
LOG.info("allocated " + numSlots + " slots before running out.");
int slotIdx=0;
for (Iterator iter=shm.slotIterator(); iter.hasNext(); ) {
Assert.assertTrue(slots.contains(iter.next()));
}
for ( Slot slot : slots) {
Assert.assertFalse(slot.addAnchor());
Assert.assertEquals(slotIdx++,slot.getSlotIdx());
}
for ( Slot slot : slots) {
slot.makeAnchorable();
}
for ( Slot slot : slots) {
Assert.assertTrue(slot.addAnchor());
}
for ( Slot slot : slots) {
slot.removeAnchor();
}
for ( Slot slot : slots) {
shm.unregisterSlot(slot.getSlotIdx());
slot.makeInvalid();
}
shm.free();
stream.close();
FileUtil.fullyDelete(path);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test failover with various options
*/
@Test public void testFencer() throws Exception {
assertEquals(-1,runTool("-failover","nn1","nn2"));
File tmpFile=File.createTempFile("testFencer",".txt");
tmpFile.deleteOnExit();
if (Shell.WINDOWS) {
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,"shell(echo %target_nameserviceid%.%target_namenodeid% " + "%target_port% %dfs_ha_namenode_id% > " + tmpFile.getAbsolutePath() + ")");
}
else {
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,"shell(echo -n $target_nameserviceid.$target_namenodeid " + "$target_port $dfs_ha_namenode_id > " + tmpFile.getAbsolutePath() + ")");
}
tool.setConf(conf);
assertEquals(0,runTool("-transitionToActive","nn1"));
assertEquals(0,runTool("-failover","nn1","nn2"));
assertEquals(0,runTool("-ns","minidfs-ns","-failover","nn2","nn1"));
assertEquals("",Files.toString(tmpFile,Charsets.UTF_8));
assertEquals(0,runTool("-failover","nn1","nn2","--forcefence"));
String fenceCommandOutput=Files.toString(tmpFile,Charsets.UTF_8).replaceAll(" *[\r\n]+","");
assertEquals("minidfs-ns.nn1 " + nn1Port + " nn1",fenceCommandOutput);
tmpFile.delete();
assertEquals(0,runTool("-failover","nn2","nn1","--forceactive"));
assertFalse(tmpFile.exists());
conf.unset(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY);
tool.setConf(conf);
assertEquals(-1,runTool("-failover","nn1","nn2","--forcefence"));
assertFalse(tmpFile.exists());
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,"foobar!");
tool.setConf(conf);
assertEquals(-1,runTool("-failover","nn1","nn2","--forcefence"));
assertFalse(tmpFile.exists());
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,TestDFSHAAdmin.getFencerTrueCommand());
tool.setConf(conf);
assertEquals(0,runTool("-failover","--forcefence","nn1","nn2"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Call fetch token using http server
*/
@Test public void expectedTokenIsRetrievedFromHttp() throws Exception {
final Token testToken=new Token("id".getBytes(),"pwd".getBytes(),FakeRenewer.KIND,new Text("127.0.0.1:1234"));
WebHdfsFileSystem fs=mock(WebHdfsFileSystem.class);
doReturn(testToken).when(fs).getDelegationToken(anyString());
Path p=new Path(f.getRoot().getAbsolutePath(),tokenFile);
DelegationTokenFetcher.saveDelegationToken(conf,fs,null,p);
Credentials creds=Credentials.readTokenStorageFile(p,conf);
Iterator> itr=creds.getAllTokens().iterator();
assertTrue("token not exist error",itr.hasNext());
Token> fetchedToken=itr.next();
Assert.assertArrayEquals("token wrong identifier error",testToken.getIdentifier(),fetchedToken.getIdentifier());
Assert.assertArrayEquals("token wrong password error",testToken.getPassword(),fetchedToken.getPassword());
DelegationTokenFetcher.renewTokens(conf,p);
Assert.assertEquals(testToken,FakeRenewer.getLastRenewed());
DelegationTokenFetcher.cancelTokens(conf,p);
Assert.assertEquals(testToken,FakeRenewer.getLastCanceled());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void TestGetConfExcludeCommand() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,System.getProperty("test.build.data","target/test/data") + "/Getconf/");
Path hostsFile=new Path(dir,"hosts");
Path excludeFile=new Path(dir,"exclude");
conf.set(DFSConfigKeys.DFS_HOSTS,hostsFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
writeConfigFile(hostsFile,null);
writeConfigFile(excludeFile,null);
String[] args={"-excludeFile"};
String ret=runTool(conf,args,true);
assertEquals(excludeFile.toUri().getPath(),ret.trim());
cleanupFile(localFileSys,excludeFile.getParent());
}
APIUtilityVerifier BooleanVerifier
/**
* Test invalid argument to the tool
*/
@Test(timeout=10000) public void testInvalidArgument() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
String[] args={"-invalidArgument"};
String ret=runTool(conf,args,false);
assertTrue(ret.contains(GetConf.USAGE));
}
APIUtilityVerifier InternalCallVerifier NullVerifier
/**
* Test empty configuration
*/
@Test(timeout=10000) public void testEmptyConf() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration(false);
getAddressListFromTool(TestType.NAMENODE,conf,false);
System.out.println(getAddressListFromTool(TestType.BACKUP,conf,false));
getAddressListFromTool(TestType.SECONDARY,conf,false);
getAddressListFromTool(TestType.NNRPCADDRESSES,conf,false);
for ( Command cmd : Command.values()) {
String arg=cmd.getName();
CommandHandler handler=Command.getHandler(arg);
assertNotNull("missing handler: " + cmd,handler);
if (handler.key != null) {
String[] args={handler.key};
runTool(conf,args,false);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testGetSpecificKey() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
conf.set("mykey"," myval ");
String[] args={"-confKey","mykey"};
String toolResult=runTool(conf,args,true);
assertEquals(String.format("myval%n"),toolResult);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void TestGetConfIncludeCommand() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,System.getProperty("test.build.data","target/test/data") + "/Getconf/");
Path hostsFile=new Path(dir,"hosts");
Path excludeFile=new Path(dir,"exclude");
conf.set(DFSConfigKeys.DFS_HOSTS,hostsFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
writeConfigFile(hostsFile,null);
writeConfigFile(excludeFile,null);
String[] args={"-includeFile"};
String ret=runTool(conf,args,true);
assertEquals(hostsFile.toUri().getPath(),ret.trim());
cleanupFile(localFileSys,excludeFile.getParent());
}
APIUtilityVerifier EqualityVerifier
@Test public void testFileDistributionCalculatorWithOptions() throws IOException {
int status=OfflineImageViewerPB.run(new String[]{"-i",originalFsimage.getAbsolutePath(),"-o","-","-p","FileDistribution","-maxSize","512","-step","8"});
assertEquals(0,status);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testWebImageViewer() throws IOException, InterruptedException, URISyntaxException {
WebImageViewer viewer=new WebImageViewer(NetUtils.createSocketAddr("localhost:0"));
try {
viewer.initServer(originalFsimage.getAbsolutePath());
int port=viewer.getPort();
URI uri=new URI("webhdfs://localhost:" + String.valueOf(port));
Configuration conf=new Configuration();
WebHdfsFileSystem webhdfs=(WebHdfsFileSystem)FileSystem.get(uri,conf);
FileStatus[] statuses=webhdfs.listStatus(new Path("/"));
assertEquals(NUM_DIRS + 2,statuses.length);
statuses=webhdfs.listStatus(new Path("/dir0"));
assertEquals(FILES_PER_DIR,statuses.length);
FileStatus status=webhdfs.listStatus(new Path("/dir0/file0"))[0];
FileStatus expected=writtenFiles.get("/dir0/file0");
compareFile(expected,status);
statuses=webhdfs.listStatus(new Path("/emptydir"));
assertEquals(0,statuses.length);
URL url=new URL("http://localhost:" + port + "/webhdfs/v1/invalid/?op=LISTSTATUS");
verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND,url);
url=new URL("http://localhost:" + port + "/webhdfs/v1?op=LISTSTATUS");
verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND,url);
status=webhdfs.getFileStatus(new Path("/dir0/file0"));
compareFile(expected,status);
url=new URL("http://localhost:" + port + "/webhdfs/v1/invalid/?op=GETFILESTATUS");
verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND,url);
url=new URL("http://localhost:" + port + "/webhdfs/v1/?op=INVALID");
verifyHttpResponseCode(HttpURLConnection.HTTP_BAD_REQUEST,url);
url=new URL("http://localhost:" + port + "/webhdfs/v1/?op=LISTSTATUS");
HttpURLConnection connection=(HttpURLConnection)url.openConnection();
connection.setRequestMethod("POST");
connection.connect();
assertEquals(HttpURLConnection.HTTP_BAD_METHOD,connection.getResponseCode());
}
finally {
viewer.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFileDistributionCalculator() throws IOException {
StringWriter output=new StringWriter();
PrintWriter o=new PrintWriter(output);
new FileDistributionCalculator(new Configuration(),0,0,o).visit(new RandomAccessFile(originalFsimage,"r"));
o.close();
Pattern p=Pattern.compile("totalFiles = (\\d+)\n");
Matcher matcher=p.matcher(output.getBuffer());
assertTrue(matcher.find() && matcher.groupCount() == 1);
int totalFiles=Integer.parseInt(matcher.group(1));
assertEquals(NUM_DIRS * FILES_PER_DIR,totalFiles);
p=Pattern.compile("totalDirectories = (\\d+)\n");
matcher=p.matcher(output.getBuffer());
assertTrue(matcher.find() && matcher.groupCount() == 1);
int totalDirs=Integer.parseInt(matcher.group(1));
assertEquals(NUM_DIRS + 3,totalDirs);
FileStatus maxFile=Collections.max(writtenFiles.values(),new Comparator(){
@Override public int compare( FileStatus first, FileStatus second){
return first.getLen() < second.getLen() ? -1 : ((first.getLen() == second.getLen()) ? 0 : 1);
}
}
);
p=Pattern.compile("maxFileSize = (\\d+)\n");
matcher=p.matcher(output.getBuffer());
assertTrue(matcher.find() && matcher.groupCount() == 1);
assertEquals(maxFile.getLen(),Long.parseLong(matcher.group(1)));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testWebImageViewerForAcl() throws IOException, InterruptedException, URISyntaxException {
WebImageViewer viewer=new WebImageViewer(NetUtils.createSocketAddr("localhost:0"));
try {
viewer.initServer(originalFsimage.getAbsolutePath());
int port=viewer.getPort();
URI uri=new URI("webhdfs://localhost:" + String.valueOf(port));
Configuration conf=new Configuration();
WebHdfsFileSystem webhdfs=(WebHdfsFileSystem)FileSystem.get(uri,conf);
AclStatus acl=webhdfs.getAclStatus(new Path("/dirWithNoAcl"));
assertEquals(writtenAcls.get("/dirWithNoAcl"),acl);
acl=webhdfs.getAclStatus(new Path("/dirWithDefaultAcl"));
assertEquals(writtenAcls.get("/dirWithDefaultAcl"),acl);
acl=webhdfs.getAclStatus(new Path("/noAcl"));
assertEquals(writtenAcls.get("/noAcl"),acl);
acl=webhdfs.getAclStatus(new Path("/withAcl"));
assertEquals(writtenAcls.get("/withAcl"),acl);
acl=webhdfs.getAclStatus(new Path("/withSeveralAcls"));
assertEquals(writtenAcls.get("/withSeveralAcls"),acl);
URL url=new URL("http://localhost:" + port + "/webhdfs/v1/invalid/?op=GETACLSTATUS");
HttpURLConnection connection=(HttpURLConnection)url.openConnection();
connection.setRequestMethod("GET");
connection.connect();
assertEquals(HttpURLConnection.HTTP_NOT_FOUND,connection.getResponseCode());
}
finally {
viewer.shutdown();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test case where there is no existing file
*/
@Test public void testOverwriteFile() throws IOException {
assertTrue("Creating empty dst file",DST_FILE.createNewFile());
OutputStream fos=new AtomicFileOutputStream(DST_FILE);
assertTrue("Empty file still exists",DST_FILE.exists());
fos.write(TEST_STRING.getBytes());
fos.flush();
assertEquals("",DFSTestUtil.readFile(DST_FILE));
fos.close();
String readBackData=DFSTestUtil.readFile(DST_FILE);
assertEquals(TEST_STRING,readBackData);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test case where there is no existing file
*/
@Test public void testWriteNewFile() throws IOException {
OutputStream fos=new AtomicFileOutputStream(DST_FILE);
assertFalse(DST_FILE.exists());
fos.write(TEST_STRING.getBytes());
fos.flush();
assertFalse(DST_FILE.exists());
fos.close();
assertTrue(DST_FILE.exists());
String readBackData=DFSTestUtil.readFile(DST_FILE);
assertEquals(TEST_STRING,readBackData);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetSet() throws IOException {
BestEffortLongFile f=new BestEffortLongFile(FILE,12345L);
try {
assertEquals(12345L,f.get());
assertTrue(FILE.exists());
Random r=new Random();
for (int i=0; i < 100; i++) {
long newVal=r.nextLong();
f.set(newVal);
assertEquals(newVal,f.get());
BestEffortLongFile f2=new BestEffortLongFile(FILE,999L);
try {
assertEquals(newVal,f2.get());
}
finally {
IOUtils.closeStream(f2);
}
}
}
finally {
IOUtils.closeStream(f);
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testMultiBasic(){
LOG.info("Test multi element basic");
for ( Integer i : list) {
assertTrue(set.add(i));
}
assertEquals(list.size(),set.size());
for ( Integer i : list) {
assertTrue(set.contains(i));
}
for ( Integer i : list) {
assertFalse(set.add(i));
}
for ( Integer i : list) {
assertTrue(set.contains(i));
}
Iterator iter=set.iterator();
int num=0;
while (iter.hasNext()) {
Integer next=iter.next();
assertNotNull(next);
assertTrue(list.contains(next));
num++;
}
assertEquals(list.size(),num);
LOG.info("Test multi element basic - DONE");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testGetBookmarkReturnsBookmarkIterator(){
LOG.info("Test getBookmark returns proper iterator");
assertTrue(set.addAll(list));
Iterator bookmark=set.getBookmark();
assertEquals(bookmark.next(),list.get(0));
final int numAdvance=list.size() / 2;
for (int i=1; i < numAdvance; i++) {
bookmark.next();
}
Iterator bookmark2=set.getBookmark();
assertEquals(bookmark2.next(),list.get(numAdvance));
}
APIUtilityVerifier EqualityVerifier
@Test public void testComputeMd5ForFile() throws Exception {
MD5Hash computedDigest=MD5FileUtils.computeMd5ForFile(TEST_FILE);
assertEquals(TEST_MD5,computedDigest);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testConcat() throws Exception {
Path[] paths={new Path("/test/hadoop/file1"),new Path("/test/hadoop/file2"),new Path("/test/hadoop/file3")};
DFSTestUtil.createFile(fSys,paths[0],1024,(short)3,0);
DFSTestUtil.createFile(fSys,paths[1],1024,(short)3,0);
DFSTestUtil.createFile(fSys,paths[2],1024,(short)3,0);
Path catPath=new Path("/test/hadoop/catFile");
DFSTestUtil.createFile(fSys,catPath,1024,(short)3,0);
Assert.assertTrue(exists(fSys,catPath));
fSys.concat(catPath,paths);
Assert.assertFalse(exists(fSys,paths[0]));
Assert.assertFalse(exists(fSys,paths[1]));
Assert.assertFalse(exists(fSys,paths[2]));
FileStatus fileStatus=fSys.getFileStatus(catPath);
Assert.assertEquals(1024 * 4,fileStatus.getLen());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSWebHdfsFileSystem() throws Exception {
FileSystem fs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,"swebhdfs");
final Path f=new Path("/testswebhdfs");
FSDataOutputStream os=fs.create(f);
os.write(23);
os.close();
Assert.assertTrue(fs.exists(f));
InputStream is=fs.open(f);
Assert.assertEquals(23,is.read());
is.close();
fs.close();
}
APIUtilityVerifier IterativeVerifier EqualityVerifier
@Test public void testToXAttrMap() throws IOException {
String jsonString="{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," + "{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}";
Map,?> json=(Map,?>)JSON.parse(jsonString);
XAttr xAttr1=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).setName("a1").setValue(XAttrCodec.decodeValue("0x313233")).build();
XAttr xAttr2=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).setName("a2").setValue(XAttrCodec.decodeValue("0x313131")).build();
List xAttrs=Lists.newArrayList();
xAttrs.add(xAttr1);
xAttrs.add(xAttr2);
Map xAttrMap=XAttrHelper.buildXAttrMap(xAttrs);
Map parsedXAttrMap=JsonUtil.toXAttrs(json);
Assert.assertEquals(xAttrMap.size(),parsedXAttrMap.size());
Iterator> iter=xAttrMap.entrySet().iterator();
while (iter.hasNext()) {
Entry entry=iter.next();
Assert.assertArrayEquals(entry.getValue(),parsedXAttrMap.get(entry.getKey()));
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testHdfsFileStatus(){
final long now=Time.now();
final String parent="/dir";
final HdfsFileStatus status=new HdfsFileStatus(1001L,false,3,1L << 26,now,now + 10,new FsPermission((short)0644),"user","group",DFSUtil.string2Bytes("bar"),DFSUtil.string2Bytes("foo"),INodeId.GRANDFATHER_INODE_ID,0,null);
final FileStatus fstatus=toFileStatus(status,parent);
System.out.println("status = " + status);
System.out.println("fstatus = " + fstatus);
final String json=JsonUtil.toJsonString(status,true);
System.out.println("json = " + json.replace(",",",\n "));
final HdfsFileStatus s2=JsonUtil.toFileStatus((Map,?>)JSON.parse(json),true);
final FileStatus fs2=toFileStatus(s2,parent);
System.out.println("s2 = " + s2);
System.out.println("fs2 = " + fs2);
Assert.assertEquals(fstatus,fs2);
}
APIUtilityVerifier EqualityVerifier
@Test public void testGetXAttrFromJson() throws IOException {
String jsonString="{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," + "{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}";
Map,?> json=(Map,?>)JSON.parse(jsonString);
byte[] value=JsonUtil.getXAttr(json,"user.a2");
Assert.assertArrayEquals(XAttrCodec.decodeValue("0x313131"),value);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testToDatanodeInfoWithName() throws Exception {
Map response=new HashMap();
String name="127.0.0.1:1004";
response.put("name",name);
response.put("hostName","localhost");
response.put("storageID","fake-id");
response.put("infoPort",1338l);
response.put("ipcPort",1339l);
response.put("capacity",1024l);
response.put("dfsUsed",512l);
response.put("remaining",512l);
response.put("blockPoolUsed",512l);
response.put("lastUpdate",0l);
response.put("xceiverCount",4096l);
response.put("networkLocation","foo.bar.baz");
response.put("adminState","NORMAL");
response.put("cacheCapacity",123l);
response.put("cacheUsed",321l);
DatanodeInfo di=JsonUtil.toDatanodeInfo(response);
Assert.assertEquals(name,di.getXferAddr());
Map r=JsonUtil.toJsonMap(di);
Assert.assertEquals(name,r.get("name"));
Assert.assertEquals("127.0.0.1",r.get("ipAddr"));
Assert.assertEquals(1004,(int)(Integer)r.get("xferPort"));
String[] badNames={"127.0.0.1","127.0.0.1:",":","127.0.0.1:sweet",":123"};
for ( String badName : badNames) {
response.put("name",badName);
checkDecodeFailure(response);
}
response.remove("name");
checkDecodeFailure(response);
response.put("ipAddr","127.0.0.1");
checkDecodeFailure(response);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testToAclStatus(){
String jsonString="{\"AclStatus\":{\"entries\":[\"user::rwx\",\"user:user1:rw-\",\"group::rw-\",\"other::r-x\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
Map,?> json=(Map,?>)JSON.parse(jsonString);
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"user1",READ_WRITE),aclEntry(ACCESS,GROUP,READ_WRITE),aclEntry(ACCESS,OTHER,READ_EXECUTE));
AclStatus.Builder aclStatusBuilder=new AclStatus.Builder();
aclStatusBuilder.owner("testuser");
aclStatusBuilder.group("supergroup");
aclStatusBuilder.addEntries(aclSpec);
aclStatusBuilder.stickyBit(false);
Assert.assertEquals("Should be equal",aclStatusBuilder.build(),JsonUtil.toAclStatus(json));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier
@Test public void testRenewal() throws Exception {
Configuration conf=new Configuration();
Token> token1=mock(Token.class);
Token> token2=mock(Token.class);
final long renewCycle=100;
DelegationTokenRenewer.renewCycle=renewCycle;
UserGroupInformation ugi=UserGroupInformation.createUserForTesting("foo",new String[]{"bar"});
DummyFs fs=spy(new DummyFs());
doReturn(token1).doReturn(token2).when(fs).getDelegationToken(null);
doReturn(token1).when(fs).getRenewToken();
doThrow(new IOException("renew failed")).when(token1).renew(conf);
doThrow(new IOException("get failed")).when(fs).addDelegationTokens(null,null);
final URI uri=new URI("dummyfs://127.0.0.1:1234");
TokenAspect tokenAspect=new TokenAspect(fs,SecurityUtil.buildTokenService(uri),DummyFs.TOKEN_KIND);
fs.initialize(uri,conf);
tokenAspect.initDelegationToken(ugi);
tokenAspect.ensureTokenInitialized();
DelegationTokenRenewer.RenewAction> action=getActionFromTokenAspect(tokenAspect);
verify(fs).setDelegationToken(token1);
assertTrue(action.isValid());
Thread.sleep(renewCycle * 2);
assertSame(action,getActionFromTokenAspect(tokenAspect));
assertFalse(action.isValid());
tokenAspect.ensureTokenInitialized();
verify(fs,times(2)).getDelegationToken(anyString());
verify(fs).setDelegationToken(token2);
assertNotSame(action,getActionFromTokenAspect(tokenAspect));
action=getActionFromTokenAspect(tokenAspect);
assertTrue(action.isValid());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test snapshot deletion through WebHdfs
*/
@Test public void testWebHdfsDeleteSnapshot() throws Exception {
MiniDFSCluster cluster=null;
final Configuration conf=WebHdfsTestUtil.createConf();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final FileSystem webHdfs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME);
final Path foo=new Path("/foo");
dfs.mkdirs(foo);
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo,"s1");
final Path spath=webHdfs.createSnapshot(foo,null);
Assert.assertTrue(webHdfs.exists(spath));
final Path s1path=SnapshotTestHelper.getSnapshotRoot(foo,"s1");
Assert.assertTrue(webHdfs.exists(s1path));
webHdfs.deleteSnapshot(foo,"s1");
Assert.assertFalse(webHdfs.exists(s1path));
webHdfs.deleteSnapshot(foo,spath.getName());
Assert.assertFalse(webHdfs.exists(spath));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=300000) public void testLargeDirectory() throws Exception {
final Configuration conf=WebHdfsTestUtil.createConf();
final int listLimit=2;
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT,listLimit);
FsPermission.setUMask(conf,new FsPermission((short)0077));
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME).setPermission(new Path("/"),new FsPermission(FsAction.ALL,FsAction.ALL,FsAction.ALL));
UserGroupInformation.setLoginUser(UserGroupInformation.createUserForTesting("not-superuser",new String[]{"not-supergroup"}));
UserGroupInformation.createUserForTesting("me",new String[]{"my-group"}).doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws IOException, URISyntaxException {
FileSystem fs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME);
Path d=new Path("/my-dir");
Assert.assertTrue(fs.mkdirs(d));
for (int i=0; i < listLimit * 3; i++) {
Path p=new Path(d,"file-" + i);
Assert.assertTrue(fs.createNewFile(p));
}
Assert.assertEquals(listLimit * 3,fs.listStatus(d).length);
return null;
}
}
);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=300000) public void testNumericalUserName() throws Exception {
final Configuration conf=WebHdfsTestUtil.createConf();
conf.set(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,"^[A-Za-z0-9_][A-Za-z0-9._-]*[$]?$");
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
cluster.waitActive();
WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME).setPermission(new Path("/"),new FsPermission(FsAction.ALL,FsAction.ALL,FsAction.ALL));
UserGroupInformation.createUserForTesting("123",new String[]{"my-group"}).doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws IOException, URISyntaxException {
FileSystem fs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME);
Path d=new Path("/my-dir");
Assert.assertTrue(fs.mkdirs(d));
return null;
}
}
);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test snapshot creation through WebHdfs
*/
@Test public void testWebHdfsCreateSnapshot() throws Exception {
MiniDFSCluster cluster=null;
final Configuration conf=WebHdfsTestUtil.createConf();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final FileSystem webHdfs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME);
final Path foo=new Path("/foo");
dfs.mkdirs(foo);
try {
webHdfs.createSnapshot(foo);
fail("Cannot create snapshot on a non-snapshottable directory");
}
catch ( Exception e) {
GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory",e);
}
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo,"s1");
final Path spath=webHdfs.createSnapshot(foo,null);
Assert.assertTrue(webHdfs.exists(spath));
final Path s1path=SnapshotTestHelper.getSnapshotRoot(foo,"s1");
Assert.assertTrue(webHdfs.exists(s1path));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test snapshot rename through WebHdfs
*/
@Test public void testWebHdfsRenameSnapshot() throws Exception {
MiniDFSCluster cluster=null;
final Configuration conf=WebHdfsTestUtil.createConf();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final FileSystem webHdfs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME);
final Path foo=new Path("/foo");
dfs.mkdirs(foo);
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo,"s1");
final Path s1path=SnapshotTestHelper.getSnapshotRoot(foo,"s1");
Assert.assertTrue(webHdfs.exists(s1path));
webHdfs.renameSnapshot(foo,"s1","s2");
Assert.assertFalse(webHdfs.exists(s1path));
final Path s2path=SnapshotTestHelper.getSnapshotRoot(foo,"s2");
Assert.assertTrue(webHdfs.exists(s2path));
webHdfs.deleteSnapshot(foo,"s2");
Assert.assertFalse(webHdfs.exists(s2path));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testMultipleNamespacesConfigured() throws Exception {
Configuration conf=DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
MiniDFSCluster cluster=null;
WebHdfsFileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(1).build();
HATestUtil.setFailoverConfigurations(cluster,conf,LOGICAL_NAME);
cluster.waitActive();
DFSTestUtil.addHAConfiguration(conf,LOGICAL_NAME + "remote");
DFSTestUtil.setFakeHttpAddresses(conf,LOGICAL_NAME + "remote");
fs=(WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI,conf);
Assert.assertEquals(2,fs.getResolvedNNAddr().length);
}
finally {
IOUtils.cleanup(null,fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHA() throws IOException {
Configuration conf=DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(0).build();
HATestUtil.setFailoverConfigurations(cluster,conf,LOGICAL_NAME);
cluster.waitActive();
fs=FileSystem.get(WEBHDFS_URI,conf);
cluster.transitionToActive(0);
final Path dir=new Path("/test");
Assert.assertTrue(fs.mkdirs(dir));
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
final Path dir2=new Path("/test2");
Assert.assertTrue(fs.mkdirs(dir2));
}
finally {
IOUtils.cleanup(null,fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test public void testGetOpWithRedirect(){
Future future1=contentLengthFuture(redirectResponse);
Future future2=contentLengthFuture(errResponse);
try {
fs.open(p).read();
Assert.fail();
}
catch ( IOException ioe) {
}
Assert.assertEquals(null,getContentLength(future1));
Assert.assertEquals(null,getContentLength(future2));
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test public void testPutOpWithRedirect(){
Future future1=contentLengthFuture(redirectResponse);
Future future2=contentLengthFuture(errResponse);
try {
FSDataOutputStream os=fs.create(p);
os.write(new byte[]{0});
os.close();
Assert.fail();
}
catch ( IOException ioe) {
}
Assert.assertEquals("0",getContentLength(future1));
Assert.assertEquals("chunked",getContentLength(future2));
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test public void testDelete(){
Future future=contentLengthFuture(errResponse);
try {
fs.delete(p,false);
Assert.fail();
}
catch ( IOException ioe) {
}
Assert.assertEquals(null,getContentLength(future));
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test public void testPutOp(){
Future future=contentLengthFuture(errResponse);
try {
fs.mkdirs(p);
Assert.fail();
}
catch ( IOException ioe) {
}
Assert.assertEquals("0",getContentLength(future));
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test public void testPostOp(){
Future future=contentLengthFuture(errResponse);
try {
fs.concat(p,new Path[]{p});
Assert.fail();
}
catch ( IOException ioe) {
}
Assert.assertEquals("0",getContentLength(future));
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test public void testPostOpWithRedirect(){
Future future1=contentLengthFuture(redirectResponse);
Future future2=contentLengthFuture(errResponse);
try {
FSDataOutputStream os=fs.append(p);
os.write(new byte[]{0});
os.close();
Assert.fail();
}
catch ( IOException ioe) {
}
Assert.assertEquals("0",getContentLength(future1));
Assert.assertEquals("chunked",getContentLength(future2));
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test public void testGetOp() throws Exception {
Future future=contentLengthFuture(errResponse);
try {
fs.getFileStatus(p);
Assert.fail();
}
catch ( IOException ioe) {
}
Assert.assertEquals(null,getContentLength(future));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testEncodedPathUrl() throws IOException, URISyntaxException {
Configuration conf=new Configuration();
final WebHdfsFileSystem webhdfs=(WebHdfsFileSystem)FileSystem.get(uri,conf);
String pathName="/hdtest010%2C60020%2C1371000602151.1371058984668";
Path fsPath=new Path(pathName);
URL encodedPathUrl=webhdfs.toUrl(PutOpParam.Op.CREATE,fsPath);
Assert.assertEquals(WebHdfsFileSystem.PATH_PREFIX + pathName,encodedPathUrl.toURI().getPath());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testRedirect() throws Exception {
final String dir="/testRedirect/";
final String filename="file";
final Path p=new Path(dir,filename);
final String[] writeStrings=createStrings("write to webhdfs ","write");
final String[] appendStrings=createStrings("append to webhdfs ","append");
for (int i=0; i < webhdfs.length; i++) {
final FSDataOutputStream out=webhdfs[i].create(p);
out.write(writeStrings[i].getBytes());
out.close();
}
for (int i=0; i < webhdfs.length; i++) {
final long expected=writeStrings[i].length();
Assert.assertEquals(expected,webhdfs[i].getFileStatus(p).getLen());
}
for (int i=0; i < webhdfs.length; i++) {
final FSDataInputStream in=webhdfs[i].open(p);
for (int c, j=0; (c=in.read()) != -1; j++) {
Assert.assertEquals(writeStrings[i].charAt(j),c);
}
in.close();
}
for (int i=0; i < webhdfs.length; i++) {
final FSDataOutputStream out=webhdfs[i].append(p);
out.write(appendStrings[i].getBytes());
out.close();
}
for (int i=0; i < webhdfs.length; i++) {
final long expected=writeStrings[i].length() + appendStrings[i].length();
Assert.assertEquals(expected,webhdfs[i].getFileStatus(p).getLen());
}
for (int i=0; i < webhdfs.length; i++) {
final StringBuilder b=new StringBuilder();
final FSDataInputStream in=webhdfs[i].open(p);
for (int c; (c=in.read()) != -1; ) {
b.append((char)c);
}
final int wlen=writeStrings[i].length();
Assert.assertEquals(writeStrings[i],b.substring(0,wlen));
Assert.assertEquals(appendStrings[i],b.substring(wlen));
in.close();
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testToSortedStringEscapesURICharacters(){
final String sep="&";
Param,?> ampParam=new TokenArgumentParam("token&ersand");
Param,?> equalParam=new RenewerParam("renewer=equal");
final String expected="&renewer=renewer%3Dequal&token=token%26ampersand";
final String actual=Param.toSortedString(sep,equalParam,ampParam);
Assert.assertEquals(expected,actual);
}
APIUtilityVerifier IterativeVerifier EqualityVerifier
@Test public void testConcatSourcesParam(){
final String[] strings={"/","/foo","/bar"};
for (int n=0; n < strings.length; n++) {
final String[] sub=new String[n];
final Path[] paths=new Path[n];
for (int i=0; i < paths.length; i++) {
paths[i]=new Path(sub[i]=strings[i]);
}
final String expected=StringUtils.join(",",Arrays.asList(sub));
final ConcatSourcesParam computed=new ConcatSourcesParam(paths);
Assert.assertEquals(expected,computed.getValue());
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHttpsCookie() throws IOException, GeneralSecurityException {
URL base=new URL("https://" + NetUtils.getHostPortString(server.getConnectorAddress(1)));
HttpsURLConnection conn=(HttpsURLConnection)new URL(base,"/echo").openConnection();
conn.setSSLSocketFactory(clientSslFactory.createSSLSocketFactory());
String header=conn.getHeaderField("Set-Cookie");
List cookies=HttpCookie.parse(header);
Assert.assertTrue(!cookies.isEmpty());
Assert.assertTrue(header.contains("; HttpOnly"));
Assert.assertTrue(cookies.get(0).getSecure());
Assert.assertTrue("token".equals(cookies.get(0).getValue()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHttpCookie() throws IOException {
URL base=new URL("http://" + NetUtils.getHostPortString(server.getConnectorAddress(0)));
HttpURLConnection conn=(HttpURLConnection)new URL(base,"/echo").openConnection();
String header=conn.getHeaderField("Set-Cookie");
List cookies=HttpCookie.parse(header);
Assert.assertTrue(!cookies.isEmpty());
Assert.assertTrue(header.contains("; HttpOnly"));
Assert.assertTrue("token".equals(cookies.get(0).getValue()));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Verify the administrator access for /logs, /stacks, /conf, /logLevel and
* /metrics servlets.
* @throws Exception
*/
@Test public void testAuthorizationOfDefaultServlets() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,true);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,true);
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,DummyFilterInitializer.class.getName());
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MyGroupsProvider.class.getName());
Groups.getUserToGroupsMappingService(conf);
MyGroupsProvider.clearMapping();
MyGroupsProvider.mapping.put("userA",Arrays.asList("groupA"));
MyGroupsProvider.mapping.put("userB",Arrays.asList("groupB"));
MyGroupsProvider.mapping.put("userC",Arrays.asList("groupC"));
MyGroupsProvider.mapping.put("userD",Arrays.asList("groupD"));
MyGroupsProvider.mapping.put("userE",Arrays.asList("groupE"));
HttpServer2 myServer=new HttpServer2.Builder().setName("test").addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf).setACL(new AccessControlList("userA,userB groupC,groupD")).build();
myServer.setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE,conf);
myServer.start();
String serverURL="http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
for ( String servlet : new String[]{"conf","logs","stacks","logLevel","metrics"}) {
for ( String user : new String[]{"userA","userB","userC","userD"}) {
assertEquals(HttpURLConnection.HTTP_OK,getHttpStatusCode(serverURL + servlet,user));
}
assertEquals(HttpURLConnection.HTTP_FORBIDDEN,getHttpStatusCode(serverURL + servlet,"userE"));
}
myServer.stop();
}
APIUtilityVerifier EqualityVerifier
@Test public void testContentTypes() throws Exception {
URL cssUrl=new URL(baseUrl,"/static/test.css");
HttpURLConnection conn=(HttpURLConnection)cssUrl.openConnection();
conn.connect();
assertEquals(200,conn.getResponseCode());
assertEquals("text/css",conn.getContentType());
URL servletUrl=new URL(baseUrl,"/echo?a=b");
conn=(HttpURLConnection)servletUrl.openConnection();
conn.connect();
assertEquals(200,conn.getResponseCode());
assertEquals("text/plain; charset=utf-8",conn.getContentType());
servletUrl=new URL(baseUrl,"/echo?a=b.css");
conn=(HttpURLConnection)servletUrl.openConnection();
conn.connect();
assertEquals(200,conn.getResponseCode());
assertEquals("text/plain; charset=utf-8",conn.getContentType());
servletUrl=new URL(baseUrl,"/htmlcontent");
conn=(HttpURLConnection)servletUrl.openConnection();
conn.connect();
assertEquals(200,conn.getResponseCode());
assertEquals("text/html; charset=utf-8",conn.getContentType());
}
APIUtilityVerifier EqualityVerifier
/**
* Test that verifies headers can be up to 64K long.
* The test adds a 63K header leaving 1K for other headers.
* This is because the header buffer setting is for ALL headers,
* names and values included.
*/
@Test public void testLongHeader() throws Exception {
URL url=new URL(baseUrl,"/longheader");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
StringBuilder sb=new StringBuilder();
for (int i=0; i < 63 * 1024; i++) {
sb.append("a");
}
conn.setRequestProperty("longheader",sb.toString());
assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testBindAddress() throws Exception {
checkBindAddress("localhost",0,false).stop();
HttpServer2 myServer=checkBindAddress("localhost",0,false);
HttpServer2 myServer2=null;
try {
int port=myServer.getConnectorAddress(0).getPort();
myServer2=checkBindAddress("localhost",port,true);
port=myServer2.getConnectorAddress(0).getPort();
myServer2.stop();
assertNull(myServer2.getConnectorAddress(0));
myServer2.openListeners();
assertEquals(port,myServer2.getConnectorAddress(0).getPort());
}
finally {
myServer.stop();
if (myServer2 != null) {
myServer2.stop();
}
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testJersey() throws Exception {
LOG.info("BEGIN testJersey()");
final String js=readOutput(new URL(baseUrl,"/jersey/foo?op=bar"));
final Map m=parse(js);
LOG.info("m=" + m);
assertEquals("foo",m.get(JerseyResource.PATH));
assertEquals("bar",m.get(JerseyResource.OP));
LOG.info("END testJersey()");
}
APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testNoCacheHeader() throws Exception {
URL url=new URL(baseUrl,"/echo?a=b&c=d");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
assertEquals("no-cache",conn.getHeaderField("Cache-Control"));
assertEquals("no-cache",conn.getHeaderField("Pragma"));
assertNotNull(conn.getHeaderField("Expires"));
assertNotNull(conn.getHeaderField("Date"));
assertEquals(conn.getHeaderField("Expires"),conn.getHeaderField("Date"));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testOldFormat() throws IOException {
ObjectWritable.writeObject(out,i,i.getClass(),null);
in.reset(out.getData(),out.getLength());
@SuppressWarnings("deprecation") String className=UTF8.readString(in);
assertEquals("The int[] written by ObjectWritable as a non-compact array " + "was not labelled as an array of int",i.getClass().getName(),className);
int length=in.readInt();
assertEquals("The int[] written by ObjectWritable as a non-compact array " + "was not expected length",i.length,length);
int[] readValue=new int[length];
try {
for (int i=0; i < length; i++) {
readValue[i]=(int)((Integer)ObjectWritable.readObject(in,null));
}
}
catch ( Exception e) {
fail("The int[] written by ObjectWritable as a non-compact array " + "was corrupted. Failed to correctly read int[] of length " + length + ". Got exception:\n"+ StringUtils.stringifyException(e));
}
assertTrue("The int[] written by ObjectWritable as a non-compact array " + "was corrupted.",Arrays.equals(i,readValue));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("deprecation") public void testObjectLabeling() throws IOException {
ObjectWritable.writeObject(out,i,i.getClass(),null,true);
ArrayPrimitiveWritable apw=new ArrayPrimitiveWritable(i);
ObjectWritable.writeObject(out,apw,apw.getClass(),null,true);
in.reset(out.getData(),out.getLength());
String className=UTF8.readString(in);
assertEquals("The int[] written by ObjectWritable was not labelled as " + "an ArrayPrimitiveWritable.Internal",ArrayPrimitiveWritable.Internal.class.getName(),className);
ArrayPrimitiveWritable.Internal apwi=new ArrayPrimitiveWritable.Internal();
apwi.readFields(in);
assertEquals("The ArrayPrimitiveWritable.Internal component type was corrupted",int.class,apw.getComponentType());
assertTrue("The int[] written by ObjectWritable as " + "ArrayPrimitiveWritable.Internal was corrupted",Arrays.equals(i,(int[])(apwi.get())));
String declaredClassName=UTF8.readString(in);
assertEquals("The APW written by ObjectWritable was not labelled as " + "declaredClass ArrayPrimitiveWritable",ArrayPrimitiveWritable.class.getName(),declaredClassName);
className=UTF8.readString(in);
assertEquals("The APW written by ObjectWritable was not labelled as " + "class ArrayPrimitiveWritable",ArrayPrimitiveWritable.class.getName(),className);
ArrayPrimitiveWritable apw2=new ArrayPrimitiveWritable();
apw2.readFields(in);
assertEquals("The ArrayPrimitiveWritable component type was corrupted",int.class,apw2.getComponentType());
assertTrue("The int[] written by ObjectWritable as " + "ArrayPrimitiveWritable was corrupted",Arrays.equals(i,(int[])(apw2.get())));
}
APIUtilityVerifier EqualityVerifier
/**
* test {@code MapFile.Reader.midKey() } method
*/
@Test public void testMidKeyOnCurrentApi() throws Exception {
final String TEST_PREFIX="testMidKeyOnCurrentApi.mapfile";
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
writer=createWriter(TEST_PREFIX,IntWritable.class,IntWritable.class);
int SIZE=10;
for (int i=0; i < SIZE; i++) writer.append(new IntWritable(i),new IntWritable(i));
writer.close();
reader=createReader(TEST_PREFIX,IntWritable.class);
assertEquals(new IntWritable((SIZE - 1) / 2),reader.midKey());
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* test {@code MapFile.Reader.getClosest()} method
*/
@Test public void testGetClosestOnCurrentApi() throws Exception {
final String TEST_PREFIX="testGetClosestOnCurrentApi.mapfile";
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
writer=createWriter(TEST_PREFIX,Text.class,Text.class);
int FIRST_KEY=1;
for (int i=FIRST_KEY; i < 100; i+=10) {
Text t=new Text(Integer.toString(i));
writer.append(t,t);
}
writer.close();
reader=createReader(TEST_PREFIX,Text.class);
Text key=new Text("55");
Text value=new Text();
Text closest=(Text)reader.getClosest(key,value);
assertEquals(new Text("61"),closest);
closest=(Text)reader.getClosest(key,value,true);
assertEquals(new Text("51"),closest);
final Text explicitKey=new Text("21");
closest=(Text)reader.getClosest(explicitKey,value);
assertEquals(new Text("21"),explicitKey);
key=new Text("00");
closest=(Text)reader.getClosest(key,value);
assertEquals(FIRST_KEY,Integer.parseInt(closest.toString()));
key=new Text("92");
closest=(Text)reader.getClosest(key,value);
assertNull("Not null key in testGetClosestWithNewCode",closest);
closest=(Text)reader.getClosest(key,value,true);
assertEquals(new Text("91"),closest);
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings("deprecation") public void testMidKey() throws Exception {
Path dirName=new Path(TEST_DIR,"testMidKey.mapfile");
FileSystem fs=FileSystem.getLocal(conf);
Path qualifiedDirName=fs.makeQualified(dirName);
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
writer=new MapFile.Writer(conf,fs,qualifiedDirName.toString(),IntWritable.class,IntWritable.class);
writer.append(new IntWritable(1),new IntWritable(1));
writer.close();
reader=new MapFile.Reader(qualifiedDirName,conf);
assertEquals(new IntWritable(1),reader.midKey());
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test getClosest feature.
* @throws Exception
*/
@Test @SuppressWarnings("deprecation") public void testGetClosest() throws Exception {
Path dirName=new Path(TEST_DIR,"testGetClosest.mapfile");
FileSystem fs=FileSystem.getLocal(conf);
Path qualifiedDirName=fs.makeQualified(dirName);
MapFile.Writer.setIndexInterval(conf,3);
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
writer=new MapFile.Writer(conf,fs,qualifiedDirName.toString(),Text.class,Text.class);
assertEquals(3,writer.getIndexInterval());
final int FIRST_KEY=10;
for (int i=FIRST_KEY; i < 100; i+=10) {
String iStr=Integer.toString(i);
Text t=new Text("00".substring(iStr.length()) + iStr);
writer.append(t,t);
}
writer.close();
reader=new MapFile.Reader(qualifiedDirName,conf);
Text key=new Text("55");
Text value=new Text();
Text closest=(Text)reader.getClosest(key,value);
assertEquals(new Text("60"),closest);
closest=(Text)reader.getClosest(key,value,true);
assertEquals(new Text("50"),closest);
final Text TWENTY=new Text("20");
closest=(Text)reader.getClosest(TWENTY,value);
assertEquals(TWENTY,closest);
closest=(Text)reader.getClosest(TWENTY,value,true);
assertEquals(TWENTY,closest);
key=new Text("00");
closest=(Text)reader.getClosest(key,value);
assertEquals(FIRST_KEY,Integer.parseInt(closest.toString()));
closest=(Text)reader.getClosest(key,value,true);
assertNull(closest);
key=new Text("99");
closest=(Text)reader.getClosest(key,value);
assertNull(closest);
closest=(Text)reader.getClosest(key,value,true);
assertEquals(new Text("90"),closest);
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier
/**
* test all available constructor for {@code MapFile.Writer}
*/
@Test @SuppressWarnings("deprecation") public void testDeprecatedConstructors(){
String path=new Path(TEST_DIR,"writes.mapfile").toString();
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
FileSystem fs=FileSystem.getLocal(conf);
writer=new MapFile.Writer(conf,fs,path,IntWritable.class,Text.class,CompressionType.RECORD);
assertNotNull(writer);
writer.close();
writer=new MapFile.Writer(conf,fs,path,IntWritable.class,Text.class,CompressionType.RECORD,defaultProgressable);
assertNotNull(writer);
writer.close();
writer=new MapFile.Writer(conf,fs,path,IntWritable.class,Text.class,CompressionType.RECORD,defaultCodec,defaultProgressable);
assertNotNull(writer);
writer.close();
writer=new MapFile.Writer(conf,fs,path,WritableComparator.get(Text.class),Text.class);
assertNotNull(writer);
writer.close();
writer=new MapFile.Writer(conf,fs,path,WritableComparator.get(Text.class),Text.class,SequenceFile.CompressionType.RECORD);
assertNotNull(writer);
writer.close();
writer=new MapFile.Writer(conf,fs,path,WritableComparator.get(Text.class),Text.class,CompressionType.RECORD,defaultProgressable);
assertNotNull(writer);
writer.close();
reader=new MapFile.Reader(fs,path,WritableComparator.get(IntWritable.class),conf);
assertNotNull(reader);
assertNotNull("reader key is null !!!",reader.getKeyClass());
assertNotNull("reader value in null",reader.getValueClass());
}
catch ( IOException e) {
fail(e.getMessage());
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* test {@code MapFile.Reader.next(key, value)} for iteration.
*/
@Test public void testReaderKeyIteration(){
final String TEST_METHOD_KEY="testReaderKeyIteration.mapfile";
int SIZE=10;
int ITERATIONS=5;
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
writer=createWriter(TEST_METHOD_KEY,IntWritable.class,Text.class);
int start=0;
for (int i=0; i < SIZE; i++) writer.append(new IntWritable(i),new Text("Value:" + i));
writer.close();
reader=createReader(TEST_METHOD_KEY,IntWritable.class);
Writable startValue=new Text("Value:" + start);
int i=0;
while (i++ < ITERATIONS) {
IntWritable key=new IntWritable(start);
Writable value=startValue;
while (reader.next(key,value)) {
assertNotNull(key);
assertNotNull(value);
}
reader.reset();
}
assertTrue("reader seek error !!!",reader.seek(new IntWritable(SIZE / 2)));
assertFalse("reader seek error !!!",reader.seek(new IntWritable(SIZE * 2)));
}
catch ( IOException ex) {
fail("reader seek error !!!");
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings("deprecation") public void testMidKeyEmpty() throws Exception {
Path dirName=new Path(TEST_DIR,"testMidKeyEmpty.mapfile");
FileSystem fs=FileSystem.getLocal(conf);
Path qualifiedDirName=fs.makeQualified(dirName);
MapFile.Writer writer=new MapFile.Writer(conf,fs,qualifiedDirName.toString(),IntWritable.class,IntWritable.class);
writer.close();
MapFile.Reader reader=new MapFile.Reader(qualifiedDirName,conf);
try {
assertEquals(null,reader.midKey());
}
finally {
reader.close();
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier HybridVerifier
/**
* test {@code MapFile.Writer.testFix} method
*/
@Test public void testFix(){
final String INDEX_LESS_MAP_FILE="testFix.mapfile";
int PAIR_SIZE=20;
MapFile.Writer writer=null;
try {
FileSystem fs=FileSystem.getLocal(conf);
Path dir=new Path(TEST_DIR,INDEX_LESS_MAP_FILE);
writer=createWriter(INDEX_LESS_MAP_FILE,IntWritable.class,Text.class);
for (int i=0; i < PAIR_SIZE; i++) writer.append(new IntWritable(0),new Text("value"));
writer.close();
File indexFile=new File(".","." + INDEX_LESS_MAP_FILE + "/index");
boolean isDeleted=false;
if (indexFile.exists()) isDeleted=indexFile.delete();
if (isDeleted) assertTrue("testFix error !!!",MapFile.fix(fs,dir,IntWritable.class,Text.class,true,conf) == PAIR_SIZE);
}
catch ( Exception ex) {
fail("testFix error !!!");
}
finally {
IOUtils.cleanup(null,writer);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGzipCodecRead() throws IOException {
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,false);
assertFalse("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf));
Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf);
assertNotNull("zlibDecompressor is null!",zlibDecompressor);
assertTrue("ZlibFactory returned unexpected inflator",zlibDecompressor instanceof BuiltInZlibInflater);
CodecPool.returnDecompressor(zlibDecompressor);
String tmpDir=System.getProperty("test.build.data","/tmp/");
Path f=new Path(new Path(tmpDir),"testGzipCodecRead.txt.gz");
BufferedWriter bw=new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(f.toString()))));
final String msg="This is the message in the file!";
bw.write(msg);
bw.close();
CompressionCodecFactory ccf=new CompressionCodecFactory(conf);
CompressionCodec codec=ccf.getCodec(f);
Decompressor decompressor=CodecPool.getDecompressor(codec);
FileSystem fs=FileSystem.getLocal(conf);
InputStream is=fs.open(f);
is=codec.createInputStream(is,decompressor);
BufferedReader br=new BufferedReader(new InputStreamReader(is));
String line=br.readLine();
assertEquals("Didn't get the same message back!",msg,line);
br.close();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGzipCompatibility() throws IOException {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
LOG.info("seed: " + seed);
DataOutputBuffer dflbuf=new DataOutputBuffer();
GZIPOutputStream gzout=new GZIPOutputStream(dflbuf);
byte[] b=new byte[r.nextInt(128 * 1024 + 1)];
r.nextBytes(b);
gzout.write(b);
gzout.close();
DataInputBuffer gzbuf=new DataInputBuffer();
gzbuf.reset(dflbuf.getData(),dflbuf.getLength());
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,false);
CompressionCodec codec=ReflectionUtils.newInstance(GzipCodec.class,conf);
Decompressor decom=codec.createDecompressor();
assertNotNull(decom);
assertEquals(BuiltInGzipDecompressor.class,decom.getClass());
InputStream gzin=codec.createInputStream(gzbuf,decom);
dflbuf.reset();
IOUtils.copyBytes(gzin,dflbuf,4096);
final byte[] dflchk=Arrays.copyOf(dflbuf.getData(),dflbuf.getLength());
assertArrayEquals(b,dflchk);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCodecPoolGzipReuse() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,true);
if (!ZlibFactory.isNativeZlibLoaded(conf)) {
LOG.warn("testCodecPoolGzipReuse skipped: native libs not loaded");
return;
}
GzipCodec gzc=ReflectionUtils.newInstance(GzipCodec.class,conf);
DefaultCodec dfc=ReflectionUtils.newInstance(DefaultCodec.class,conf);
Compressor c1=CodecPool.getCompressor(gzc);
Compressor c2=CodecPool.getCompressor(dfc);
CodecPool.returnCompressor(c1);
CodecPool.returnCompressor(c2);
assertTrue("Got mismatched ZlibCompressor",c2 != CodecPool.getCompressor(gzc));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGzipLongOverflow() throws IOException {
LOG.info("testGzipLongOverflow");
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,false);
assertFalse("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf));
Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf);
assertNotNull("zlibDecompressor is null!",zlibDecompressor);
assertTrue("ZlibFactory returned unexpected inflator",zlibDecompressor instanceof BuiltInZlibInflater);
CodecPool.returnDecompressor(zlibDecompressor);
String tmpDir=System.getProperty("test.build.data","/tmp/");
Path f=new Path(new Path(tmpDir),"testGzipLongOverflow.bin.gz");
BufferedWriter bw=new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(f.toString()))));
final int NBUF=1024 * 4 + 1;
final char[] buf=new char[1024 * 1024];
for (int i=0; i < buf.length; i++) buf[i]='\0';
for (int i=0; i < NBUF; i++) {
bw.write(buf);
}
bw.close();
CompressionCodecFactory ccf=new CompressionCodecFactory(conf);
CompressionCodec codec=ccf.getCodec(f);
Decompressor decompressor=CodecPool.getDecompressor(codec);
FileSystem fs=FileSystem.getLocal(conf);
InputStream is=fs.open(f);
is=codec.createInputStream(is,decompressor);
BufferedReader br=new BufferedReader(new InputStreamReader(is));
for (int j=0; j < NBUF; j++) {
int n=br.read(buf);
assertEquals("got wrong read length!",n,buf.length);
for (int i=0; i < buf.length; i++) assertEquals("got wrong byte!",buf[i],'\0');
}
br.close();
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCompressDecompress(){
int BYTE_SIZE=1024 * 54;
byte[] bytes=generate(BYTE_SIZE);
Lz4Compressor compressor=new Lz4Compressor();
try {
compressor.setInput(bytes,0,bytes.length);
assertTrue("Lz4CompressDecompress getBytesRead error !!!",compressor.getBytesRead() > 0);
assertTrue("Lz4CompressDecompress getBytesWritten before compress error !!!",compressor.getBytesWritten() == 0);
byte[] compressed=new byte[BYTE_SIZE];
int cSize=compressor.compress(compressed,0,compressed.length);
assertTrue("Lz4CompressDecompress getBytesWritten after compress error !!!",compressor.getBytesWritten() > 0);
Lz4Decompressor decompressor=new Lz4Decompressor();
decompressor.setInput(compressed,0,cSize);
byte[] decompressed=new byte[BYTE_SIZE];
decompressor.decompress(decompressed,0,decompressed.length);
assertTrue("testLz4CompressDecompress finished error !!!",decompressor.finished());
assertArrayEquals(bytes,decompressed);
compressor.reset();
decompressor.reset();
assertTrue("decompressor getRemaining error !!!",decompressor.getRemaining() == 0);
}
catch ( Exception e) {
fail("testLz4CompressDecompress ex error!!!");
}
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test public void testCompressorDecopressorLogicWithCompressionStreams(){
DataOutputStream deflateOut=null;
DataInputStream inflateIn=null;
int BYTE_SIZE=1024 * 100;
byte[] bytes=generate(BYTE_SIZE);
int bufferSize=262144;
int compressionOverhead=(bufferSize / 6) + 32;
try {
DataOutputBuffer compressedDataBuffer=new DataOutputBuffer();
CompressionOutputStream deflateFilter=new BlockCompressorStream(compressedDataBuffer,new Lz4Compressor(bufferSize),bufferSize,compressionOverhead);
deflateOut=new DataOutputStream(new BufferedOutputStream(deflateFilter));
deflateOut.write(bytes,0,bytes.length);
deflateOut.flush();
deflateFilter.finish();
DataInputBuffer deCompressedDataBuffer=new DataInputBuffer();
deCompressedDataBuffer.reset(compressedDataBuffer.getData(),0,compressedDataBuffer.getLength());
CompressionInputStream inflateFilter=new BlockDecompressorStream(deCompressedDataBuffer,new Lz4Decompressor(bufferSize),bufferSize);
inflateIn=new DataInputStream(new BufferedInputStream(inflateFilter));
byte[] result=new byte[BYTE_SIZE];
inflateIn.read(result);
assertArrayEquals("original array not equals compress/decompressed array",result,bytes);
}
catch ( IOException e) {
fail("testLz4CompressorDecopressorLogicWithCompressionStreams ex error !!!");
}
finally {
try {
if (deflateOut != null) deflateOut.close();
if (inflateIn != null) inflateIn.close();
}
catch ( Exception e) {
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier HybridVerifier
@Test public void testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize(){
int BYTES_SIZE=1024 * 64 + 1;
try {
Lz4Compressor compressor=new Lz4Compressor();
byte[] bytes=generate(BYTES_SIZE);
assertTrue("needsInput error !!!",compressor.needsInput());
compressor.setInput(bytes,0,bytes.length);
byte[] emptyBytes=new byte[BYTES_SIZE];
int csize=compressor.compress(emptyBytes,0,bytes.length);
assertTrue("testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize error !!!",csize != 0);
}
catch ( Exception ex) {
fail("testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize ex error !!!");
}
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test public void testCompressorDecompressorEmptyStreamLogic(){
ByteArrayInputStream bytesIn=null;
ByteArrayOutputStream bytesOut=null;
byte[] buf=null;
BlockDecompressorStream blockDecompressorStream=null;
try {
bytesOut=new ByteArrayOutputStream();
BlockCompressorStream blockCompressorStream=new BlockCompressorStream(bytesOut,new Lz4Compressor(),1024,0);
blockCompressorStream.close();
buf=bytesOut.toByteArray();
assertEquals("empty stream compressed output size != 4",4,buf.length);
bytesIn=new ByteArrayInputStream(buf);
blockDecompressorStream=new BlockDecompressorStream(bytesIn,new Lz4Decompressor(),1024);
assertEquals("return value is not -1",-1,blockDecompressorStream.read());
}
catch ( Exception e) {
fail("testCompressorDecompressorEmptyStreamLogic ex error !!!" + e.getMessage());
}
finally {
if (blockDecompressorStream != null) try {
bytesIn.close();
bytesOut.close();
blockDecompressorStream.close();
}
catch ( IOException e) {
}
}
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test public void testCompressorDecompressorEmptyStreamLogic(){
ByteArrayInputStream bytesIn=null;
ByteArrayOutputStream bytesOut=null;
byte[] buf=null;
BlockDecompressorStream blockDecompressorStream=null;
try {
bytesOut=new ByteArrayOutputStream();
BlockCompressorStream blockCompressorStream=new BlockCompressorStream(bytesOut,new SnappyCompressor(),1024,0);
blockCompressorStream.close();
buf=bytesOut.toByteArray();
assertEquals("empty stream compressed output size != 4",4,buf.length);
bytesIn=new ByteArrayInputStream(buf);
blockDecompressorStream=new BlockDecompressorStream(bytesIn,new SnappyDecompressor(),1024);
assertEquals("return value is not -1",-1,blockDecompressorStream.read());
}
catch ( Exception e) {
fail("testCompressorDecompressorEmptyStreamLogic ex error !!!" + e.getMessage());
}
finally {
if (blockDecompressorStream != null) try {
bytesIn.close();
bytesOut.close();
blockDecompressorStream.close();
}
catch ( IOException e) {
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testZlibCompressDecompress(){
byte[] rawData=null;
int rawDataSize=0;
rawDataSize=1024 * 64;
rawData=generate(rawDataSize);
try {
ZlibCompressor compressor=new ZlibCompressor();
ZlibDecompressor decompressor=new ZlibDecompressor();
assertFalse("testZlibCompressDecompress finished error",compressor.finished());
compressor.setInput(rawData,0,rawData.length);
assertTrue("testZlibCompressDecompress getBytesRead before error",compressor.getBytesRead() == 0);
compressor.finish();
byte[] compressedResult=new byte[rawDataSize];
int cSize=compressor.compress(compressedResult,0,rawDataSize);
assertTrue("testZlibCompressDecompress getBytesRead ather error",compressor.getBytesRead() == rawDataSize);
assertTrue("testZlibCompressDecompress compressed size no less then original size",cSize < rawDataSize);
decompressor.setInput(compressedResult,0,cSize);
byte[] decompressedBytes=new byte[rawDataSize];
decompressor.decompress(decompressedBytes,0,decompressedBytes.length);
assertArrayEquals("testZlibCompressDecompress arrays not equals ",rawData,decompressedBytes);
compressor.reset();
decompressor.reset();
}
catch ( IOException ex) {
fail("testZlibCompressDecompress ex !!!" + ex);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testLocate() throws IOException {
if (skip) return;
writeRecords(3 * records1stBlock);
Reader reader=new Reader(fs.open(path),fs.getFileStatus(path).getLen(),conf);
Scanner scanner=reader.createScanner();
locate(scanner,composeSortedKey(KEY,2).getBytes());
locate(scanner,composeSortedKey(KEY,records1stBlock - 1).getBytes());
locate(scanner,composeSortedKey(KEY,records1stBlock).getBytes());
Location locX=locate(scanner,"keyX".getBytes());
Assert.assertEquals(scanner.endLocation,locX);
scanner.close();
reader.close();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test for races in fstat usage
* NOTE: this test is likely to fail on RHEL 6.0 which has a non-threadsafe
* implementation of getpwuid_r.
*/
@Test(timeout=30000) public void testMultiThreadedFstat() throws Exception {
if (Path.WINDOWS) {
return;
}
final FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testfstat"));
final AtomicReference thrown=new AtomicReference();
List statters=new ArrayList();
for (int i=0; i < 10; i++) {
Thread statter=new Thread(){
@Override public void run(){
long et=Time.now() + 5000;
while (Time.now() < et) {
try {
NativeIO.POSIX.Stat stat=NativeIO.POSIX.getFstat(fos.getFD());
assertEquals(System.getProperty("user.name"),stat.getOwner());
assertNotNull(stat.getGroup());
assertTrue(!stat.getGroup().isEmpty());
assertEquals("Stat mode field should indicate a regular file",NativeIO.POSIX.Stat.S_IFREG,stat.getMode() & NativeIO.POSIX.Stat.S_IFMT);
}
catch ( Throwable t) {
thrown.set(t);
}
}
}
}
;
statters.add(statter);
statter.start();
}
for ( Thread t : statters) {
t.join();
}
fos.close();
if (thrown.get() != null) {
throw new RuntimeException(thrown.get());
}
}
APIUtilityVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testMlock() throws Exception {
assumeTrue(NativeIO.isAvailable());
final File TEST_FILE=new File(new File(System.getProperty("test.build.data","build/test/data")),"testMlockFile");
final int BUF_LEN=12289;
byte buf[]=new byte[BUF_LEN];
int bufSum=0;
for (int i=0; i < buf.length; i++) {
buf[i]=(byte)(i % 60);
bufSum+=buf[i];
}
FileOutputStream fos=new FileOutputStream(TEST_FILE);
try {
fos.write(buf);
fos.getChannel().force(true);
}
finally {
fos.close();
}
FileInputStream fis=null;
FileChannel channel=null;
try {
fis=new FileInputStream(TEST_FILE);
channel=fis.getChannel();
long fileSize=channel.size();
MappedByteBuffer mapbuf=channel.map(MapMode.READ_ONLY,0,fileSize);
NativeIO.POSIX.mlock(mapbuf,fileSize);
int sum=0;
for (int i=0; i < fileSize; i++) {
sum+=mapbuf.get(i);
}
assertEquals("Expected sums to be equal",bufSum,sum);
NativeIO.POSIX.munmap(mapbuf);
}
finally {
if (channel != null) {
channel.close();
}
if (fis != null) {
fis.close();
}
}
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testSetFilePointer() throws Exception {
if (!Path.WINDOWS) {
return;
}
LOG.info("Set a file pointer on Windows");
try {
File testfile=new File(TEST_DIR,"testSetFilePointer");
assertTrue("Create test subject",testfile.exists() || testfile.createNewFile());
FileWriter writer=new FileWriter(testfile);
try {
for (int i=0; i < 200; i++) if (i < 100) writer.write('a');
else writer.write('b');
writer.flush();
}
catch ( Exception writerException) {
fail("Got unexpected exception: " + writerException.getMessage());
}
finally {
writer.close();
}
FileDescriptor fd=NativeIO.Windows.createFile(testfile.getCanonicalPath(),NativeIO.Windows.GENERIC_READ,NativeIO.Windows.FILE_SHARE_READ | NativeIO.Windows.FILE_SHARE_WRITE | NativeIO.Windows.FILE_SHARE_DELETE,NativeIO.Windows.OPEN_EXISTING);
NativeIO.Windows.setFilePointer(fd,120,NativeIO.Windows.FILE_BEGIN);
FileReader reader=new FileReader(fd);
try {
int c=reader.read();
assertTrue("Unexpected character: " + c,c == 'b');
}
catch ( Exception readerException) {
fail("Got unexpected exception: " + readerException.getMessage());
}
finally {
reader.close();
}
}
catch ( Exception e) {
fail("Got unexpected exception: " + e.getMessage());
}
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testCreateFile() throws Exception {
if (!Path.WINDOWS) {
return;
}
LOG.info("Open a file on Windows with SHARE_DELETE shared mode");
try {
File testfile=new File(TEST_DIR,"testCreateFile");
assertTrue("Create test subject",testfile.exists() || testfile.createNewFile());
FileDescriptor fd=NativeIO.Windows.createFile(testfile.getCanonicalPath(),NativeIO.Windows.GENERIC_READ,NativeIO.Windows.FILE_SHARE_READ | NativeIO.Windows.FILE_SHARE_WRITE | NativeIO.Windows.FILE_SHARE_DELETE,NativeIO.Windows.OPEN_EXISTING);
FileInputStream fin=new FileInputStream(fd);
try {
fin.read();
File newfile=new File(TEST_DIR,"testRenamedFile");
boolean renamed=testfile.renameTo(newfile);
assertTrue("Rename failed.",renamed);
fin.read();
}
catch ( Exception e) {
fail("Got unexpected exception: " + e.getMessage());
}
finally {
fin.close();
}
}
catch ( Exception e) {
fail("Got unexpected exception: " + e.getMessage());
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testFstat() throws Exception {
FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testfstat"));
NativeIO.POSIX.Stat stat=NativeIO.POSIX.getFstat(fos.getFD());
fos.close();
LOG.info("Stat: " + String.valueOf(stat));
String owner=stat.getOwner();
String expectedOwner=System.getProperty("user.name");
if (Path.WINDOWS) {
UserGroupInformation ugi=UserGroupInformation.createRemoteUser(expectedOwner);
final String adminsGroupString="Administrators";
if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
expectedOwner=adminsGroupString;
}
}
assertEquals(expectedOwner,owner);
assertNotNull(stat.getGroup());
assertTrue(!stat.getGroup().isEmpty());
assertEquals("Stat mode field should indicate a regular file",NativeIO.POSIX.Stat.S_IFREG,stat.getMode() & NativeIO.POSIX.Stat.S_IFMT);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testReadAndWrite() throws Exception {
File path=new File(TEST_BASE,"testReadAndWrite");
path.mkdirs();
SharedFileDescriptorFactory factory=SharedFileDescriptorFactory.create("woot_",new String[]{path.getAbsolutePath()});
FileInputStream inStream=factory.createDescriptor("testReadAndWrite",4096);
FileOutputStream outStream=new FileOutputStream(inStream.getFD());
outStream.write(101);
inStream.getChannel().position(0);
Assert.assertEquals(101,inStream.read());
inStream.close();
outStream.close();
FileUtil.fullyDelete(path);
}
APIUtilityVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test(timeout=10000) public void testCleanupRemainders() throws Exception {
Assume.assumeTrue(NativeIO.isAvailable());
Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
File path=new File(TEST_BASE,"testCleanupRemainders");
path.mkdirs();
String remainder1=path.getAbsolutePath() + Path.SEPARATOR + "woot2_remainder1";
String remainder2=path.getAbsolutePath() + Path.SEPARATOR + "woot2_remainder2";
createTempFile(remainder1);
createTempFile(remainder2);
SharedFileDescriptorFactory.create("woot2_",new String[]{path.getAbsolutePath()});
Assert.assertFalse(new File(remainder1).exists());
Assert.assertFalse(new File(remainder2).exists());
FileUtil.fullyDelete(path);
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testDirectoryFallbacks() throws Exception {
File nonExistentPath=new File(TEST_BASE,"nonexistent");
File permissionDeniedPath=new File("/");
File goodPath=new File(TEST_BASE,"testDirectoryFallbacks");
goodPath.mkdirs();
try {
SharedFileDescriptorFactory.create("shm_",new String[]{nonExistentPath.getAbsolutePath(),permissionDeniedPath.getAbsolutePath()});
Assert.fail();
}
catch ( IOException e) {
}
SharedFileDescriptorFactory factory=SharedFileDescriptorFactory.create("shm_",new String[]{nonExistentPath.getAbsolutePath(),permissionDeniedPath.getAbsolutePath(),goodPath.getAbsolutePath()});
Assert.assertEquals(goodPath.getAbsolutePath(),factory.getPath());
FileUtil.fullyDelete(goodPath);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that concurrent failed method invocations only result in a single
* failover.
*/
@Test public void testConcurrentMethodFailures() throws InterruptedException {
FlipFlopProxyProvider proxyProvider=new FlipFlopProxyProvider(UnreliableInterface.class,new SynchronizedUnreliableImplementation("impl1",TypeOfExceptionToFailWith.STANDBY_EXCEPTION,2),new UnreliableImplementation("impl2",TypeOfExceptionToFailWith.STANDBY_EXCEPTION));
final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,proxyProvider,RetryPolicies.failoverOnNetworkException(10));
ConcurrentMethodThread t1=new ConcurrentMethodThread(unreliable);
ConcurrentMethodThread t2=new ConcurrentMethodThread(unreliable);
t1.start();
t2.start();
t1.join();
t2.join();
assertEquals("impl2",t1.result);
assertEquals("impl2",t2.result);
assertEquals(1,proxyProvider.getFailoversOccurred());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Ensure that when all configured services are throwing StandbyException
* that we fail over back and forth between them until one is no longer
* throwing StandbyException.
*/
@Test public void testFailoverBetweenMultipleStandbys() throws UnreliableException, StandbyException, IOException {
final long millisToSleep=10000;
final UnreliableImplementation impl1=new UnreliableImplementation("impl1",TypeOfExceptionToFailWith.STANDBY_EXCEPTION);
FlipFlopProxyProvider proxyProvider=new FlipFlopProxyProvider(UnreliableInterface.class,impl1,new UnreliableImplementation("impl2",TypeOfExceptionToFailWith.STANDBY_EXCEPTION));
final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,proxyProvider,RetryPolicies.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,10,1000,10000));
new Thread(){
@Override public void run(){
ThreadUtil.sleepAtLeastIgnoreInterrupts(millisToSleep);
impl1.setIdentifier("renamed-impl1");
}
}
.start();
String result=unreliable.failsIfIdentifierDoesntMatch("renamed-impl1");
assertEquals("renamed-impl1",result);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test for {@link RetryInvocationHandler#isRpcInvocation(Object)}
*/
@Test public void testRpcInvocation() throws Exception {
final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,unreliableImpl,RETRY_FOREVER);
assertTrue(RetryInvocationHandler.isRpcInvocation(unreliable));
ProtocolTranslator xlator=new ProtocolTranslator(){
int count=0;
@Override public Object getUnderlyingProxyObject(){
count++;
return unreliable;
}
@Override public String toString(){
return "" + count;
}
}
;
assertTrue(RetryInvocationHandler.isRpcInvocation(xlator));
assertEquals(xlator.toString(),"1");
assertFalse(RetryInvocationHandler.isRpcInvocation(new Object()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRetryInterruptible() throws Throwable {
final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,unreliableImpl,retryUpToMaximumTimeWithFixedSleep(10,10,TimeUnit.SECONDS));
final CountDownLatch latch=new CountDownLatch(1);
final AtomicReference futureThread=new AtomicReference();
ExecutorService exec=Executors.newSingleThreadExecutor();
Future future=exec.submit(new Callable(){
@Override public Throwable call() throws Exception {
futureThread.set(Thread.currentThread());
latch.countDown();
try {
unreliable.alwaysFailsWithFatalException();
}
catch ( UndeclaredThrowableException ute) {
return ute.getCause();
}
return null;
}
}
);
latch.await();
Thread.sleep(1000);
assertTrue(futureThread.get().isAlive());
futureThread.get().interrupt();
Throwable e=future.get(1,TimeUnit.SECONDS);
assertNotNull(e);
assertEquals(InterruptedException.class,e.getClass());
assertEquals("sleep interrupted",e.getMessage());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testWritableConfigurable() throws Exception {
conf.set(CONF_TEST_KEY,CONF_TEST_VALUE);
FooGenericWritable generic=new FooGenericWritable();
generic.setConf(conf);
Baz baz=new Baz();
generic.set(baz);
Baz result=SerializationTestUtil.testSerialization(conf,baz);
assertEquals(baz,result);
assertNotNull(result.getConf());
}
APIUtilityVerifier EqualityVerifier
@Test public void testWritableSerialization() throws Exception {
Text before=new Text("test writable");
Text after=SerializationTestUtil.testSerialization(conf,before);
assertEquals(before,after);
}
APIUtilityVerifier UtilityVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=60000) public void testStandAloneClient() throws IOException {
Client client=new Client(LongWritable.class,conf);
InetSocketAddress address=new InetSocketAddress("127.0.0.1",10);
try {
client.call(new LongWritable(RANDOM.nextLong()),address,null,null,0,conf);
fail("Expected an exception to have been thrown");
}
catch ( IOException e) {
String message=e.getMessage();
String addressText=address.getHostName() + ":" + address.getPort();
assertTrue("Did not find " + addressText + " in "+ message,message.contains(addressText));
Throwable cause=e.getCause();
assertNotNull("No nested exception in " + e,cause);
String causeText=cause.getMessage();
assertTrue("Did not find " + causeText + " in "+ message,message.contains(causeText));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test if the rpc server gets the retry count from client.
*/
@Test(timeout=60000) public void testCallRetryCount() throws IOException {
final int retryCount=255;
final Client client=new Client(LongWritable.class,conf);
Client.setCallIdAndRetryCount(Client.nextCallId(),255);
final TestServer server=new TestServer(1,false);
server.callListener=new Runnable(){
@Override public void run(){
Assert.assertEquals(retryCount,Server.getCallRetryCount());
}
}
;
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller=new SerialCaller(client,addr,10);
caller.run();
assertFalse(caller.failed);
}
finally {
client.stop();
server.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test if
* (1) the rpc server uses the call id/retry provided by the rpc client, and
* (2) the rpc client receives the same call id/retry from the rpc server.
*/
@Test(timeout=60000) public void testCallIdAndRetry() throws IOException {
final CallInfo info=new CallInfo();
final Client client=new Client(LongWritable.class,conf){
@Override Call createCall( RpcKind rpcKind, Writable rpcRequest){
final Call call=super.createCall(rpcKind,rpcRequest);
info.id=call.id;
info.retry=call.retry;
return call;
}
@Override void checkResponse( RpcResponseHeaderProto header) throws IOException {
super.checkResponse(header);
Assert.assertEquals(info.id,header.getCallId());
Assert.assertEquals(info.retry,header.getRetryCount());
}
}
;
final TestServer server=new TestServer(1,false);
server.callListener=new Runnable(){
@Override public void run(){
Assert.assertEquals(info.id,Server.getCallId());
Assert.assertEquals(info.retry,Server.getCallRetryCount());
}
}
;
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller=new SerialCaller(client,addr,10);
caller.run();
assertFalse(caller.failed);
}
finally {
client.stop();
server.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test if the rpc server gets the default retry count (0) from client.
*/
@Test(timeout=60000) public void testInitialCallRetryCount() throws IOException {
final Client client=new Client(LongWritable.class,conf);
final TestServer server=new TestServer(1,false);
server.callListener=new Runnable(){
@Override public void run(){
Assert.assertEquals(0,Server.getCallRetryCount());
}
}
;
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller=new SerialCaller(client,addr,10);
caller.run();
assertFalse(caller.failed);
}
finally {
client.stop();
server.stop();
}
}
APIUtilityVerifier IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests that client generates a unique sequential call ID for each RPC call,
* even if multiple threads are using the same client.
* @throws InterruptedException
*/
@Test(timeout=60000) public void testUniqueSequentialCallIds() throws IOException, InterruptedException {
int serverThreads=10, callerCount=100, perCallerCallCount=100;
TestServer server=new TestServer(serverThreads,false);
final List callIds=Collections.synchronizedList(new ArrayList());
server.callListener=new Runnable(){
@Override public void run(){
callIds.add(Server.getCallId());
}
}
;
Client client=new Client(LongWritable.class,conf);
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
SerialCaller[] callers=new SerialCaller[callerCount];
for (int i=0; i < callerCount; ++i) {
callers[i]=new SerialCaller(client,addr,perCallerCallCount);
callers[i].start();
}
for (int i=0; i < callerCount; ++i) {
callers[i].join();
assertFalse(callers[i].failed);
}
}
finally {
client.stop();
server.stop();
}
int expectedCallCount=callerCount * perCallerCallCount;
assertEquals(expectedCallCount,callIds.size());
Collections.sort(callIds);
final int startID=callIds.get(0).intValue();
for (int i=0; i < expectedCallCount; ++i) {
assertEquals(startID + i,callIds.get(i).intValue());
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testPluggableIdentityProvider(){
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,"org.apache.hadoop.ipc.UserIdentityProvider");
List providers=conf.getInstances(CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,IdentityProvider.class);
assertTrue(providers.size() == 1);
IdentityProvider ip=providers.get(0);
assertNotNull(ip);
assertEquals(ip.getClass(),UserIdentityProvider.class);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Check that we can reach a NameNode or Resource Manager using a specific
* socket factory
*/
@Test public void testSocketFactory() throws IOException {
Configuration sconf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(sconf).numDataNodes(1).build();
final int nameNodePort=cluster.getNameNodePort();
FileSystem fs=cluster.getFileSystem();
Assert.assertTrue(fs instanceof DistributedFileSystem);
DistributedFileSystem directDfs=(DistributedFileSystem)fs;
Configuration cconf=getCustomSocketConfigs(nameNodePort);
fs=FileSystem.get(cconf);
Assert.assertTrue(fs instanceof DistributedFileSystem);
DistributedFileSystem dfs=(DistributedFileSystem)fs;
JobClient client=null;
MiniMRYarnCluster miniMRYarnCluster=null;
try {
Path filePath=new Path("/dir");
Assert.assertFalse(directDfs.exists(filePath));
Assert.assertFalse(dfs.exists(filePath));
directDfs.mkdirs(filePath);
Assert.assertTrue(directDfs.exists(filePath));
Assert.assertTrue(dfs.exists(filePath));
fs=FileSystem.get(sconf);
JobConf jobConf=new JobConf();
FileSystem.setDefaultUri(jobConf,fs.getUri().toString());
miniMRYarnCluster=initAndStartMiniMRYarnCluster(jobConf);
JobConf jconf=new JobConf(miniMRYarnCluster.getConfig());
jconf.set("hadoop.rpc.socket.factory.class.default","org.apache.hadoop.ipc.DummySocketFactory");
jconf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME);
String rmAddress=jconf.get("yarn.resourcemanager.address");
String[] split=rmAddress.split(":");
jconf.set("yarn.resourcemanager.address",split[0] + ':' + (Integer.parseInt(split[1]) + 10));
client=new JobClient(jconf);
JobStatus[] jobs=client.jobsToComplete();
Assert.assertTrue(jobs.length == 0);
}
finally {
closeClient(client);
closeDfs(dfs);
closeDfs(directDfs);
stopMiniMRYarnCluster(miniMRYarnCluster);
shutdownDFSCluster(cluster);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void test1() throws IOException {
ProtocolProxy> proxy;
proxy=RPC.getProtocolProxy(Foo0.class,Foo0.versionID,addr,conf);
Foo0 foo0=(Foo0)proxy.getProxy();
Assert.assertEquals("Foo0",foo0.ping());
proxy=RPC.getProtocolProxy(Foo1.class,Foo1.versionID,addr,conf);
Foo1 foo1=(Foo1)proxy.getProxy();
Assert.assertEquals("Foo1",foo1.ping());
Assert.assertEquals("Foo1",foo1.ping());
proxy=RPC.getProtocolProxy(Bar.class,Foo1.versionID,addr,conf);
Bar bar=(Bar)proxy.getProxy();
Assert.assertEquals(99,bar.echo(99));
Mixin mixin=bar;
mixin.hello();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* getProtocolVersion of an unimplemented version should return highest version
* Similarly getProtocolSignature should work.
* @throws IOException
*/
@Test public void testNonExistingProtocol2() throws IOException {
ProtocolProxy> proxy;
proxy=RPC.getProtocolProxy(FooUnimplemented.class,FooUnimplemented.versionID,addr,conf);
FooUnimplemented foo=(FooUnimplemented)proxy.getProxy();
Assert.assertEquals(Foo1.versionID,foo.getProtocolVersion(RPC.getProtocolName(FooUnimplemented.class),FooUnimplemented.versionID));
foo.getProtocolSignature(RPC.getProtocolName(FooUnimplemented.class),FooUnimplemented.versionID,0);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=6000) public void testExtraLongRpc() throws Exception {
TestRpcService2 client=getClient2();
final String shortString=StringUtils.repeat("X",4);
EchoRequestProto echoRequest=EchoRequestProto.newBuilder().setMessage(shortString).build();
EchoResponseProto echoResponse=client.echo2(null,echoRequest);
Assert.assertEquals(shortString,echoResponse.getMessage());
final String longString=StringUtils.repeat("X",4096);
echoRequest=EchoRequestProto.newBuilder().setMessage(longString).build();
try {
echoResponse=client.echo2(null,echoRequest);
Assert.fail("expected extra-long RPC to fail");
}
catch ( ServiceException se) {
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testServerAddress() throws IOException {
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).build();
InetSocketAddress bindAddr=null;
try {
bindAddr=NetUtils.getConnectAddress(server);
}
finally {
server.stop();
}
assertEquals(InetAddress.getLocalHost(),bindAddr.getAddress());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testStopProxy() throws IOException {
StoppedProtocol proxy=RPC.getProxy(StoppedProtocol.class,StoppedProtocol.versionID,null,conf);
StoppedInvocationHandler invocationHandler=(StoppedInvocationHandler)Proxy.getInvocationHandler(proxy);
assertEquals(0,invocationHandler.getCloseCalled());
RPC.stopProxy(proxy);
assertEquals(1,invocationHandler.getCloseCalled());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testRpcMetrics() throws Exception {
Configuration configuration=new Configuration();
final int interval=1;
configuration.setBoolean(CommonConfigurationKeys.RPC_METRICS_QUANTILE_ENABLE,true);
configuration.set(CommonConfigurationKeys.RPC_METRICS_PERCENTILES_INTERVALS_KEY,"" + interval);
final Server server=new RPC.Builder(configuration).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).build();
server.start();
final TestProtocol proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,server.getListenerAddress(),configuration);
try {
for (int i=0; i < 1000; i++) {
proxy.ping();
proxy.echo("" + i);
}
MetricsRecordBuilder rpcMetrics=getMetrics(server.getRpcMetrics().name());
assertTrue("Expected non-zero rpc queue time",getLongCounter("RpcQueueTimeNumOps",rpcMetrics) > 0);
assertTrue("Expected non-zero rpc processing time",getLongCounter("RpcProcessingTimeNumOps",rpcMetrics) > 0);
MetricsAsserts.assertQuantileGauges("RpcQueueTime" + interval + "s",rpcMetrics);
MetricsAsserts.assertQuantileGauges("RpcProcessingTime" + interval + "s",rpcMetrics);
}
finally {
if (proxy != null) {
RPC.stopProxy(proxy);
}
server.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testProxyAddress() throws IOException {
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).build();
TestProtocol proxy=null;
try {
server.start();
InetSocketAddress addr=NetUtils.getConnectAddress(server);
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
assertEquals(addr,RPC.getServerAddress(proxy));
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testSlowRpc() throws IOException {
System.out.println("Testing Slow RPC");
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
TestProtocol proxy=null;
try {
server.start();
InetSocketAddress addr=NetUtils.getConnectAddress(server);
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
SlowRPC slowrpc=new SlowRPC(proxy);
Thread thread=new Thread(slowrpc,"SlowRPC");
thread.start();
assertTrue("Slow RPC should not have finished1.",!slowrpc.isDone());
proxy.slowPing(false);
assertTrue("Slow RPC should not have finished2.",!slowrpc.isDone());
proxy.slowPing(false);
while (!slowrpc.isDone()) {
System.out.println("Waiting for slow RPC to get done.");
try {
Thread.sleep(1000);
}
catch ( InterruptedException e) {
}
}
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
System.out.println("Down slow rpc testing");
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testWrappedStopProxy() throws IOException {
StoppedProtocol wrappedProxy=RPC.getProxy(StoppedProtocol.class,StoppedProtocol.versionID,null,conf);
StoppedInvocationHandler invocationHandler=(StoppedInvocationHandler)Proxy.getInvocationHandler(wrappedProxy);
StoppedProtocol proxy=(StoppedProtocol)RetryProxy.create(StoppedProtocol.class,wrappedProxy,RetryPolicies.RETRY_FOREVER);
assertEquals(0,invocationHandler.getCloseCalled());
RPC.stopProxy(proxy);
assertEquals(1,invocationHandler.getCloseCalled());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConfRpc() throws IOException {
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(1).setVerbose(false).build();
int confQ=conf.getInt(CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_KEY,CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_DEFAULT);
assertEquals(confQ,server.getMaxQueueSize());
int confReaders=conf.getInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY,CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_DEFAULT);
assertEquals(confReaders,server.getNumReaders());
server.stop();
server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(1).setnumReaders(3).setQueueSizePerHandler(200).setVerbose(false).build();
assertEquals(3,server.getNumReaders());
assertEquals(200,server.getMaxQueueSize());
server.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Verify that ProtocolMetaInfoServerSideTranslatorPB correctly looks up
* the server registry to extract protocol signatures and versions.
*/
@Test public void testProtocolMetaInfoSSTranslatorPB() throws Exception {
TestImpl1 impl=new TestImpl1();
server=new RPC.Builder(conf).setProtocol(TestProtocol1.class).setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE,TestProtocol0.class,impl);
server.start();
ProtocolMetaInfoServerSideTranslatorPB xlator=new ProtocolMetaInfoServerSideTranslatorPB(server);
GetProtocolSignatureResponseProto resp=xlator.getProtocolSignature(null,createGetProtocolSigRequestProto(TestProtocol1.class,RPC.RpcKind.RPC_PROTOCOL_BUFFER));
Assert.assertEquals(0,resp.getProtocolSignatureCount());
resp=xlator.getProtocolSignature(null,createGetProtocolSigRequestProto(TestProtocol1.class,RPC.RpcKind.RPC_WRITABLE));
Assert.assertEquals(1,resp.getProtocolSignatureCount());
ProtocolSignatureProto sig=resp.getProtocolSignatureList().get(0);
Assert.assertEquals(TestProtocol1.versionID,sig.getVersion());
boolean found=false;
int expected=ProtocolSignature.getFingerprint(TestProtocol1.class.getMethod("echo",String.class));
for ( int m : sig.getMethodsList()) {
if (expected == m) {
found=true;
break;
}
}
Assert.assertTrue(found);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHashCode() throws Exception {
Method strMethod=TestProtocol3.class.getMethod("echo",String.class);
int stringEchoHash=ProtocolSignature.getFingerprint(strMethod);
Method intMethod=TestProtocol3.class.getMethod("echo",int.class);
int intEchoHash=ProtocolSignature.getFingerprint(intMethod);
assertFalse(stringEchoHash == intEchoHash);
int intEchoHash1=ProtocolSignature.getFingerprint(TestProtocol2.class.getMethod("echo",int.class));
assertEquals(intEchoHash,intEchoHash1);
int stringEchoHash1=ProtocolSignature.getFingerprint(TestProtocol2.class.getMethod("echo",String.class));
assertFalse(stringEchoHash == stringEchoHash1);
int intEchoHashAlias=ProtocolSignature.getFingerprint(TestProtocol3.class.getMethod("echo_alias",int.class));
assertFalse(intEchoHash == intEchoHashAlias);
int intEchoHash2=ProtocolSignature.getFingerprint(TestProtocol3.class.getMethod("echo",int.class,int.class));
assertFalse(intEchoHash == intEchoHash2);
int hash1=ProtocolSignature.getFingerprint(new Method[]{intMethod,strMethod});
int hash2=ProtocolSignature.getFingerprint(new Method[]{strMethod,intMethod});
assertEquals(hash1,hash2);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testVersion2ClientVersion1Server() throws Exception {
TestImpl1 impl=new TestImpl1();
server=new RPC.Builder(conf).setProtocol(TestProtocol1.class).setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE,TestProtocol0.class,impl);
server.start();
addr=NetUtils.getConnectAddress(server);
Version2Client client=new Version2Client();
client.ping();
assertEquals("hello",client.echo("hello"));
assertEquals(3,client.echo(3));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testIsMethodSupported() throws IOException {
server=new RPC.Builder(conf).setProtocol(TestProtocol2.class).setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
server.start();
addr=NetUtils.getConnectAddress(server);
TestProtocol2 proxy=RPC.getProxy(TestProtocol2.class,TestProtocol2.versionID,addr,conf);
boolean supported=RpcClientUtil.isMethodSupported(proxy,TestProtocol2.class,RPC.RpcKind.RPC_WRITABLE,RPC.getProtocolVersion(TestProtocol2.class),"echo");
Assert.assertTrue(supported);
supported=RpcClientUtil.isMethodSupported(proxy,TestProtocol2.class,RPC.RpcKind.RPC_PROTOCOL_BUFFER,RPC.getProtocolVersion(TestProtocol2.class),"echo");
Assert.assertFalse(supported);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testVersion2ClientVersion2Server() throws Exception {
TestImpl2 impl=new TestImpl2();
server=new RPC.Builder(conf).setProtocol(TestProtocol2.class).setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE,TestProtocol0.class,impl);
server.start();
addr=NetUtils.getConnectAddress(server);
Version2Client client=new Version2Client();
client.ping();
assertEquals("hello",client.echo("hello"));
assertEquals(-3,client.echo(3));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testPingInterval() throws Exception {
Configuration newConf=new Configuration(conf);
newConf.set(SERVER_PRINCIPAL_KEY,SERVER_PRINCIPAL_1);
conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY,CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT);
newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY,true);
ConnectionId remoteId=ConnectionId.getConnectionId(new InetSocketAddress(0),TestSaslProtocol.class,null,0,newConf);
assertEquals(CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT,remoteId.getPingInterval());
newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY,false);
remoteId=ConnectionId.getConnectionId(new InetSocketAddress(0),TestSaslProtocol.class,null,0,newConf);
assertEquals(0,remoteId.getPingInterval());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testPerConnectionConf() throws Exception {
TestTokenSecretManager sm=new TestTokenSecretManager();
final Server server=new RPC.Builder(conf).setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
server.start();
final UserGroupInformation current=UserGroupInformation.getCurrentUser();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId=new TestTokenIdentifier(new Text(current.getUserName()));
Token token=new Token(tokenId,sm);
SecurityUtil.setTokenService(token,addr);
current.addToken(token);
Configuration newConf=new Configuration(conf);
newConf.set(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"");
Client client=null;
TestSaslProtocol proxy1=null;
TestSaslProtocol proxy2=null;
TestSaslProtocol proxy3=null;
int timeouts[]={111222,3333333};
try {
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,timeouts[0]);
proxy1=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf);
proxy1.getAuthMethod();
client=WritableRpcEngine.getClient(newConf);
Set conns=client.getConnectionIds();
assertEquals("number of connections in cache is wrong",1,conns.size());
proxy2=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf);
proxy2.getAuthMethod();
assertEquals("number of connections in cache is wrong",1,conns.size());
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,timeouts[1]);
proxy3=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf);
proxy3.getAuthMethod();
assertEquals("number of connections in cache is wrong",2,conns.size());
ConnectionId[] connsArray={RPC.getConnectionIdForProxy(proxy1),RPC.getConnectionIdForProxy(proxy2),RPC.getConnectionIdForProxy(proxy3)};
assertEquals(connsArray[0],connsArray[1]);
assertEquals(connsArray[0].getMaxIdleTime(),timeouts[0]);
assertFalse(connsArray[0].equals(connsArray[2]));
assertNotSame(connsArray[2].getMaxIdleTime(),timeouts[1]);
}
finally {
server.stop();
if (client != null) {
client.getConnectionIds().clear();
}
if (proxy1 != null) RPC.stopProxy(proxy1);
if (proxy2 != null) RPC.stopProxy(proxy2);
if (proxy3 != null) RPC.stopProxy(proxy3);
}
}
APIUtilityVerifier BooleanVerifier
@Test public void testBind() throws Exception {
Configuration conf=new Configuration();
ServerSocket socket=new ServerSocket();
InetSocketAddress address=new InetSocketAddress("0.0.0.0",0);
socket.bind(address);
try {
int min=socket.getLocalPort();
int max=min + 100;
conf.set("TestRange",min + "-" + max);
ServerSocket socket2=new ServerSocket();
InetSocketAddress address2=new InetSocketAddress("0.0.0.0",0);
Server.bind(socket2,address2,10,conf,"TestRange");
try {
assertTrue(socket2.isBound());
assertTrue(socket2.getLocalPort() > min);
assertTrue(socket2.getLocalPort() <= max);
}
finally {
socket2.close();
}
}
finally {
socket.close();
}
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testSocketFactoryAsKeyInMap(){
Map dummyCache=new HashMap();
int toBeCached1=1;
int toBeCached2=2;
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"org.apache.hadoop.ipc.TestSocketFactory$DummySocketFactory");
final SocketFactory dummySocketFactory=NetUtils.getDefaultSocketFactory(conf);
dummyCache.put(dummySocketFactory,toBeCached1);
conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"org.apache.hadoop.net.StandardSocketFactory");
final SocketFactory defaultSocketFactory=NetUtils.getDefaultSocketFactory(conf);
dummyCache.put(defaultSocketFactory,toBeCached2);
Assert.assertEquals("The cache contains two elements",2,dummyCache.size());
Assert.assertEquals("Equals of both socket factory shouldn't be same",defaultSocketFactory.equals(dummySocketFactory),false);
assertSame(toBeCached2,dummyCache.remove(defaultSocketFactory));
dummyCache.put(defaultSocketFactory,toBeCached2);
assertSame(toBeCached1,dummyCache.remove(dummySocketFactory));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @TestDir public void lifeCycle() throws Exception {
Configuration conf=new Configuration(false);
conf.set("server.services",LifeCycleService.class.getName());
Server server=createServer(conf);
assertEquals(server.getStatus(),Server.Status.UNDEF);
server.init();
assertNotNull(server.get(LifeCycleService.class));
assertEquals(server.getStatus(),Server.Status.NORMAL);
server.destroy();
assertEquals(server.getStatus(),Server.Status.SHUTDOWN);
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @TestDir public void serviceLifeCycle() throws Exception {
TestService.LIFECYCLE.clear();
Configuration conf=new Configuration(false);
conf.set("server.services",TestService.class.getName());
Server server=createServer(conf);
server.init();
assertNotNull(server.get(TestService.class));
server.destroy();
assertEquals(TestService.LIFECYCLE,Arrays.asList("init","postInit","serverStatusChange","destroy"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @TestDir public void startWithStatusNotNormal() throws Exception {
Configuration conf=new Configuration(false);
conf.set("server.startup.status","ADMIN");
Server server=createServer(conf);
server.init();
assertEquals(server.getStatus(),Server.Status.ADMIN);
server.destroy();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void sampler() throws Exception {
final long value[]=new long[1];
Instrumentation.Variable var=new Instrumentation.Variable(){
@Override public Long getValue(){
return value[0];
}
}
;
InstrumentationService.Sampler sampler=new InstrumentationService.Sampler();
sampler.init(4,var);
assertEquals(sampler.getRate(),0f,0.0001);
sampler.sample();
assertEquals(sampler.getRate(),0f,0.0001);
value[0]=1;
sampler.sample();
assertEquals(sampler.getRate(),(0d + 1) / 2,0.0001);
value[0]=2;
sampler.sample();
assertEquals(sampler.getRate(),(0d + 1 + 2) / 3,0.0001);
value[0]=3;
sampler.sample();
assertEquals(sampler.getRate(),(0d + 1 + 2+ 3) / 4,0.0001);
value[0]=4;
sampler.sample();
assertEquals(sampler.getRate(),(4d + 1 + 2+ 3) / 4,0.0001);
JSONObject json=(JSONObject)new JSONParser().parse(sampler.toJSONString());
assertEquals(json.size(),2);
assertEquals(json.get("sampler"),sampler.getRate());
assertEquals(json.get("size"),4L);
StringWriter writer=new StringWriter();
sampler.writeJSONString(writer);
writer.close();
json=(JSONObject)new JSONParser().parse(writer.toString());
assertEquals(json.size(),2);
assertEquals(json.get("sampler"),sampler.getRate());
assertEquals(json.get("size"),4L);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void timer() throws Exception {
InstrumentationService.Timer timer=new InstrumentationService.Timer(2);
InstrumentationService.Cron cron=new InstrumentationService.Cron();
long ownStart;
long ownEnd;
long totalStart;
long totalEnd;
long ownDelta;
long totalDelta;
long avgTotal;
long avgOwn;
cron.start();
ownStart=Time.now();
totalStart=ownStart;
ownDelta=0;
sleep(100);
cron.stop();
ownEnd=Time.now();
ownDelta+=ownEnd - ownStart;
sleep(100);
cron.start();
ownStart=Time.now();
sleep(100);
cron.stop();
ownEnd=Time.now();
ownDelta+=ownEnd - ownStart;
totalEnd=ownEnd;
totalDelta=totalEnd - totalStart;
avgTotal=totalDelta;
avgOwn=ownDelta;
timer.addCron(cron);
long[] values=timer.getValues();
assertEquals(values[InstrumentationService.Timer.LAST_TOTAL],totalDelta,20);
assertEquals(values[InstrumentationService.Timer.LAST_OWN],ownDelta,20);
assertEquals(values[InstrumentationService.Timer.AVG_TOTAL],avgTotal,20);
assertEquals(values[InstrumentationService.Timer.AVG_OWN],avgOwn,20);
cron=new InstrumentationService.Cron();
cron.start();
ownStart=Time.now();
totalStart=ownStart;
ownDelta=0;
sleep(200);
cron.stop();
ownEnd=Time.now();
ownDelta+=ownEnd - ownStart;
sleep(200);
cron.start();
ownStart=Time.now();
sleep(200);
cron.stop();
ownEnd=Time.now();
ownDelta+=ownEnd - ownStart;
totalEnd=ownEnd;
totalDelta=totalEnd - totalStart;
avgTotal=(avgTotal * 1 + totalDelta) / 2;
avgOwn=(avgOwn * 1 + ownDelta) / 2;
timer.addCron(cron);
values=timer.getValues();
assertEquals(values[InstrumentationService.Timer.LAST_TOTAL],totalDelta,20);
assertEquals(values[InstrumentationService.Timer.LAST_OWN],ownDelta,20);
assertEquals(values[InstrumentationService.Timer.AVG_TOTAL],avgTotal,20);
assertEquals(values[InstrumentationService.Timer.AVG_OWN],avgOwn,20);
avgTotal=totalDelta;
avgOwn=ownDelta;
cron=new InstrumentationService.Cron();
cron.start();
ownStart=Time.now();
totalStart=ownStart;
ownDelta=0;
sleep(300);
cron.stop();
ownEnd=Time.now();
ownDelta+=ownEnd - ownStart;
sleep(300);
cron.start();
ownStart=Time.now();
sleep(300);
cron.stop();
ownEnd=Time.now();
ownDelta+=ownEnd - ownStart;
totalEnd=ownEnd;
totalDelta=totalEnd - totalStart;
avgTotal=(avgTotal * 1 + totalDelta) / 2;
avgOwn=(avgOwn * 1 + ownDelta) / 2;
cron.stop();
timer.addCron(cron);
values=timer.getValues();
assertEquals(values[InstrumentationService.Timer.LAST_TOTAL],totalDelta,20);
assertEquals(values[InstrumentationService.Timer.LAST_OWN],ownDelta,20);
assertEquals(values[InstrumentationService.Timer.AVG_TOTAL],avgTotal,20);
assertEquals(values[InstrumentationService.Timer.AVG_OWN],avgOwn,20);
JSONObject json=(JSONObject)new JSONParser().parse(timer.toJSONString());
assertEquals(json.size(),4);
assertEquals(json.get("lastTotal"),values[InstrumentationService.Timer.LAST_TOTAL]);
assertEquals(json.get("lastOwn"),values[InstrumentationService.Timer.LAST_OWN]);
assertEquals(json.get("avgTotal"),values[InstrumentationService.Timer.AVG_TOTAL]);
assertEquals(json.get("avgOwn"),values[InstrumentationService.Timer.AVG_OWN]);
StringWriter writer=new StringWriter();
timer.writeJSONString(writer);
writer.close();
json=(JSONObject)new JSONParser().parse(writer.toString());
assertEquals(json.size(),4);
assertEquals(json.get("lastTotal"),values[InstrumentationService.Timer.LAST_TOTAL]);
assertEquals(json.get("lastOwn"),values[InstrumentationService.Timer.LAST_OWN]);
assertEquals(json.get("avgTotal"),values[InstrumentationService.Timer.AVG_TOTAL]);
assertEquals(json.get("avgOwn"),values[InstrumentationService.Timer.AVG_OWN]);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void variableHolder() throws Exception {
InstrumentationService.VariableHolder variableHolder=new InstrumentationService.VariableHolder();
variableHolder.var=new Instrumentation.Variable(){
@Override public String getValue(){
return "foo";
}
}
;
JSONObject json=(JSONObject)new JSONParser().parse(variableHolder.toJSONString());
assertEquals(json.size(),1);
assertEquals(json.get("value"),"foo");
StringWriter writer=new StringWriter();
variableHolder.writeJSONString(writer);
writer.close();
json=(JSONObject)new JSONParser().parse(writer.toString());
assertEquals(json.size(),1);
assertEquals(json.get("value"),"foo");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test @TestDir @SuppressWarnings("unchecked") public void sampling() throws Exception {
String dir=TestDirHelper.getTestDir().getAbsolutePath();
String services=StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName(),SchedulerService.class.getName()));
Configuration conf=new Configuration(false);
conf.set("server.services",services);
Server server=new Server("server",dir,dir,dir,dir,conf);
server.init();
Instrumentation instrumentation=server.get(Instrumentation.class);
final AtomicInteger count=new AtomicInteger();
Instrumentation.Variable varToSample=new Instrumentation.Variable(){
@Override public Long getValue(){
return (long)count.incrementAndGet();
}
}
;
instrumentation.addSampler("g","s",10,varToSample);
sleep(2000);
int i=count.get();
assertTrue(i > 0);
Map> snapshot=instrumentation.getSnapshot();
Map> samplers=(Map>)snapshot.get("samplers");
InstrumentationService.Sampler sampler=(InstrumentationService.Sampler)samplers.get("g").get("s");
assertTrue(sampler.getRate() > 0);
server.destroy();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void resolve(){
Configuration conf=new Configuration(false);
conf.set("a","A");
conf.set("b","${a}");
assertEquals(conf.getRaw("a"),"A");
assertEquals(conf.getRaw("b"),"${a}");
conf=ConfigurationUtils.resolve(conf);
assertEquals(conf.getRaw("a"),"A");
assertEquals(conf.getRaw("b"),"A");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testNestedException() throws Throwable {
Exception e=new NoRouteToHostException("that box caught fire 3 years ago");
Exception ioe=new IOException("Datacenter problems",e);
ThrowableInformation ti=new ThrowableInformation(ioe);
Log4Json l4j=new Log4Json();
long timeStamp=Time.now();
String outcome=l4j.toJson(new StringWriter(),"testNestedException",timeStamp,"INFO","quoted\"","new line\n and {}",ti).toString();
println("testNestedException",outcome);
ContainerNode rootNode=Log4Json.parse(outcome);
assertEntryEquals(rootNode,Log4Json.LEVEL,"INFO");
assertEntryEquals(rootNode,Log4Json.NAME,"testNestedException");
assertEntryEquals(rootNode,Log4Json.TIME,timeStamp);
assertEntryEquals(rootNode,Log4Json.EXCEPTION_CLASS,ioe.getClass().getName());
JsonNode node=assertNodeContains(rootNode,Log4Json.STACK);
assertTrue("Not an array: " + node,node.isArray());
node=assertNodeContains(rootNode,Log4Json.DATE);
assertTrue("Not a string: " + node,node.isTextual());
String dateText=node.getTextValue();
assertTrue("No '-' in " + dateText,dateText.contains("-"));
assertTrue("No '-' in " + dateText,dateText.contains(":"));
}
APIUtilityVerifier BooleanVerifier
@Test public void testShowJob() throws Exception {
TestJobClient client=new TestJobClient(new JobConf());
long startTime=System.currentTimeMillis();
JobID jobID=new JobID(String.valueOf(startTime),12345);
JobStatus mockJobStatus=mock(JobStatus.class);
when(mockJobStatus.getJobID()).thenReturn(jobID);
when(mockJobStatus.getState()).thenReturn(JobStatus.State.RUNNING);
when(mockJobStatus.getStartTime()).thenReturn(startTime);
when(mockJobStatus.getUsername()).thenReturn("mockuser");
when(mockJobStatus.getQueue()).thenReturn("mockqueue");
when(mockJobStatus.getPriority()).thenReturn(JobPriority.NORMAL);
when(mockJobStatus.getNumUsedSlots()).thenReturn(1);
when(mockJobStatus.getNumReservedSlots()).thenReturn(1);
when(mockJobStatus.getUsedMem()).thenReturn(1024);
when(mockJobStatus.getReservedMem()).thenReturn(512);
when(mockJobStatus.getNeededMem()).thenReturn(2048);
when(mockJobStatus.getSchedulingInfo()).thenReturn("NA");
Job mockJob=mock(Job.class);
when(mockJob.getTaskReports(isA(TaskType.class))).thenReturn(new TaskReport[5]);
Cluster mockCluster=mock(Cluster.class);
when(mockCluster.getJob(jobID)).thenReturn(mockJob);
client.setCluster(mockCluster);
ByteArrayOutputStream out=new ByteArrayOutputStream();
client.displayJobList(new JobStatus[]{mockJobStatus},new PrintWriter(out));
String commandLineOutput=out.toString();
System.out.println(commandLineOutput);
Assert.assertTrue(commandLineOutput.contains("Total jobs:1"));
verify(mockJobStatus,atLeastOnce()).getJobID();
verify(mockJobStatus).getState();
verify(mockJobStatus).getStartTime();
verify(mockJobStatus).getUsername();
verify(mockJobStatus).getQueue();
verify(mockJobStatus).getPriority();
verify(mockJobStatus).getNumUsedSlots();
verify(mockJobStatus).getNumReservedSlots();
verify(mockJobStatus).getUsedMem();
verify(mockJobStatus).getReservedMem();
verify(mockJobStatus).getNeededMem();
verify(mockJobStatus).getSchedulingInfo();
verify(mockCluster,never()).getJob(jobID);
verify(mockJob,never()).getTaskReports(isA(TaskType.class));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testJobReportFromHistoryServer() throws Exception {
MRClientProtocol historyServerProxy=mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(getJobReportResponseFromHistoryServer());
ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(null);
ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(historyServerProxy,rm);
JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("TestJobFilePath",jobStatus.getJobFile());
Assert.assertEquals("http://TestTrackingUrl",jobStatus.getTrackingUrl());
Assert.assertEquals(1.0f,jobStatus.getMapProgress(),0.0f);
Assert.assertEquals(1.0f,jobStatus.getReduceProgress(),0.0f);
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAMAccessDisabled() throws IOException {
if (isAMReachableFromClient) {
return;
}
MRClientProtocol historyServerProxy=mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(getJobReportResponseFromHistoryServer());
ResourceMgrDelegate rmDelegate=mock(ResourceMgrDelegate.class);
try {
when(rmDelegate.getApplicationReport(jobId.getAppId())).thenReturn(getRunningApplicationReport("am1",78)).thenReturn(getRunningApplicationReport("am1",78)).thenReturn(getRunningApplicationReport("am1",78)).thenReturn(getFinishedApplicationReport());
}
catch ( YarnException e) {
throw new IOException(e);
}
ClientServiceDelegate clientServiceDelegate=spy(getClientServiceDelegate(historyServerProxy,rmDelegate));
JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("N/A",jobStatus.getJobName());
verify(clientServiceDelegate,times(0)).instantiateAMProxy(any(InetSocketAddress.class));
jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("N/A",jobStatus.getJobName());
verify(clientServiceDelegate,times(0)).instantiateAMProxy(any(InetSocketAddress.class));
jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("N/A",jobStatus.getJobName());
verify(clientServiceDelegate,times(0)).instantiateAMProxy(any(InetSocketAddress.class));
JobStatus jobStatus1=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus1);
Assert.assertEquals("TestJobFilePath",jobStatus1.getJobFile());
Assert.assertEquals("http://TestTrackingUrl",jobStatus1.getTrackingUrl());
Assert.assertEquals(1.0f,jobStatus1.getMapProgress(),0.0f);
Assert.assertEquals(1.0f,jobStatus1.getReduceProgress(),0.0f);
verify(clientServiceDelegate,times(0)).instantiateAMProxy(any(InetSocketAddress.class));
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testUnknownAppInRM() throws Exception {
MRClientProtocol historyServerProxy=mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(getJobReportResponse());
ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(historyServerProxy,getRMDelegate());
JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCountersFromHistoryServer() throws Exception {
MRClientProtocol historyServerProxy=mock(MRClientProtocol.class);
when(historyServerProxy.getCounters(getCountersRequest())).thenReturn(getCountersResponseFromHistoryServer());
ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(null);
ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(historyServerProxy,rm);
Counters counters=TypeConverter.toYarn(clientServiceDelegate.getJobCounters(oldJobId));
Assert.assertNotNull(counters);
Assert.assertEquals(1001,counters.getCounterGroup("dummyCounters").getCounter("dummyCounter").getValue());
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testRetriesOnConnectionFailure() throws Exception {
MRClientProtocol historyServerProxy=mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenThrow(new RuntimeException("1")).thenThrow(new RuntimeException("2")).thenReturn(getJobReportResponse());
ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(null);
ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(historyServerProxy,rm);
JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
verify(historyServerProxy,times(3)).getJobReport(any(GetJobReportRequest.class));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testReconnectOnAMRestart() throws IOException {
if (!isAMReachableFromClient) {
return;
}
MRClientProtocol historyServerProxy=mock(MRClientProtocol.class);
ResourceMgrDelegate rmDelegate=mock(ResourceMgrDelegate.class);
try {
when(rmDelegate.getApplicationReport(jobId.getAppId())).thenReturn(getRunningApplicationReport("am1",78)).thenReturn(getRunningApplicationReport(null,0)).thenReturn(getRunningApplicationReport(null,0)).thenReturn(getRunningApplicationReport("am2",90));
}
catch ( YarnException e) {
throw new IOException(e);
}
GetJobReportResponse jobReportResponse1=mock(GetJobReportResponse.class);
when(jobReportResponse1.getJobReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"jobName-firstGen","user",JobState.RUNNING,0,0,0,0,0,0,0,"anything",null,false,""));
MRClientProtocol firstGenAMProxy=mock(MRClientProtocol.class);
when(firstGenAMProxy.getJobReport(any(GetJobReportRequest.class))).thenReturn(jobReportResponse1).thenThrow(new RuntimeException("AM is down!"));
GetJobReportResponse jobReportResponse2=mock(GetJobReportResponse.class);
when(jobReportResponse2.getJobReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"jobName-secondGen","user",JobState.RUNNING,0,0,0,0,0,0,0,"anything",null,false,""));
MRClientProtocol secondGenAMProxy=mock(MRClientProtocol.class);
when(secondGenAMProxy.getJobReport(any(GetJobReportRequest.class))).thenReturn(jobReportResponse2);
ClientServiceDelegate clientServiceDelegate=spy(getClientServiceDelegate(historyServerProxy,rmDelegate));
doReturn(firstGenAMProxy).doReturn(secondGenAMProxy).when(clientServiceDelegate).instantiateAMProxy(any(InetSocketAddress.class));
JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("jobName-firstGen",jobStatus.getJobName());
jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("jobName-secondGen",jobStatus.getJobName());
jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("jobName-secondGen",jobStatus.getJobName());
verify(clientServiceDelegate,times(2)).instantiateAMProxy(any(InetSocketAddress.class));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testHistoryServerNotConfigured() throws Exception {
ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(null,getRMDelegate());
JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertEquals("N/A",jobStatus.getUsername());
Assert.assertEquals(JobStatus.State.PREP,jobStatus.getState());
ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class);
ApplicationReport applicationReport=getFinishedApplicationReport();
when(rm.getApplicationReport(jobId.getAppId())).thenReturn(applicationReport);
clientServiceDelegate=getClientServiceDelegate(null,rm);
jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertEquals(applicationReport.getUser(),jobStatus.getUsername());
Assert.assertEquals(JobStatus.State.SUCCEEDED,jobStatus.getState());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testFormat() throws Exception {
JobConf job=new JobConf(conf);
Reporter reporter=Reporter.NULL;
Random random=new Random();
long seed=random.nextLong();
LOG.info("seed = " + seed);
random.setSeed(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int length=10000;
final int numFiles=10;
createFiles(length,numFiles,random);
InputFormat format=new CombineSequenceFileInputFormat();
IntWritable key=new IntWritable();
BytesWritable value=new BytesWritable();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(length / (SequenceFile.SYNC_INTERVAL / 20)) + 1;
LOG.info("splitting: requesting = " + numSplits);
InputSplit[] splits=format.getSplits(job,numSplits);
LOG.info("splitting: got = " + splits.length);
assertEquals("We got more than one splits!",1,splits.length);
InputSplit split=splits[0];
assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass());
BitSet bits=new BitSet(length);
RecordReader reader=format.getRecordReader(split,job,reporter);
try {
while (reader.next(key,value)) {
assertFalse("Key in multiple partitions.",bits.get(key.get()));
bits.set(key.get());
}
}
finally {
reader.close();
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testFormat() throws Exception {
JobConf job=new JobConf(defaultConf);
Random random=new Random();
long seed=random.nextLong();
LOG.info("seed = " + seed);
random.setSeed(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int length=10000;
final int numFiles=10;
createFiles(length,numFiles,random);
CombineTextInputFormat format=new CombineTextInputFormat();
LongWritable key=new LongWritable();
Text value=new Text();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(length / 20) + 1;
LOG.info("splitting: requesting = " + numSplits);
InputSplit[] splits=format.getSplits(job,numSplits);
LOG.info("splitting: got = " + splits.length);
assertEquals("We got more than one splits!",1,splits.length);
InputSplit split=splits[0];
assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass());
BitSet bits=new BitSet(length);
LOG.debug("split= " + split);
RecordReader reader=format.getRecordReader(split,job,voidReporter);
try {
int count=0;
while (reader.next(key,value)) {
int v=Integer.parseInt(value.toString());
LOG.debug("read " + v);
if (bits.get(v)) {
LOG.warn("conflict with " + v + " at position "+ reader.getPos());
}
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
LOG.info("splits=" + split + " count="+ count);
}
finally {
reader.close();
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test using the gzip codec for reading
*/
@Test(timeout=10000) public void testGzip() throws IOException {
JobConf job=new JobConf(defaultConf);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,job);
localFs.delete(workDir,true);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n");
FileInputFormat.setInputPaths(job,workDir);
CombineTextInputFormat format=new CombineTextInputFormat();
InputSplit[] splits=format.getSplits(job,100);
assertEquals("compressed splits == 1",1,splits.length);
List results=readSplit(format,splits[0],job);
assertEquals("splits[0] length",8,results.size());
final String[] firstList={"the quick","brown","fox jumped","over"," the lazy"," dog"};
final String[] secondList={"this is a test","of gzip"};
String first=results.get(0).toString();
if (first.equals(firstList[0])) {
testResults(results,firstList,secondList);
}
else if (first.equals(secondList[0])) {
testResults(results,secondList,firstList);
}
else {
fail("unexpected first token!");
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test using the raw Inflater codec for reading gzip files.
*/
@Test public void testPrototypeInflaterGzip() throws IOException {
CompressionCodec gzip=new GzipCodec();
localFs.delete(workDir,true);
System.out.println(COLOR_BR_BLUE + "testPrototypeInflaterGzip() using " + "non-native/Java Inflater and manual gzip header/trailer parsing"+ COLOR_NORMAL);
final String fn="concat" + gzip.getDefaultExtension();
Path fnLocal=new Path(System.getProperty("test.concat.data","/tmp"),fn);
Path fnHDFS=new Path(workDir,fn);
localFs.copyFromLocalFile(fnLocal,fnHDFS);
final FileInputStream in=new FileInputStream(fnLocal.toString());
assertEquals("concat bytes available",148,in.available());
byte[] compressedBuf=new byte[256];
int numBytesRead=in.read(compressedBuf,0,10);
assertEquals("header bytes read",10,numBytesRead);
assertEquals("1st byte",0x1f,compressedBuf[0] & 0xff);
assertEquals("2nd byte",0x8b,compressedBuf[1] & 0xff);
assertEquals("3rd byte (compression method)",8,compressedBuf[2] & 0xff);
byte flags=(byte)(compressedBuf[3] & 0xff);
if ((flags & 0x04) != 0) {
numBytesRead=in.read(compressedBuf,0,2);
assertEquals("XLEN bytes read",2,numBytesRead);
int xlen=((compressedBuf[1] << 8) | compressedBuf[0]) & 0xffff;
in.skip(xlen);
}
if ((flags & 0x08) != 0) {
while ((numBytesRead=in.read()) != 0) {
assertFalse("unexpected end-of-file while reading filename",numBytesRead == -1);
}
}
if ((flags & 0x10) != 0) {
while ((numBytesRead=in.read()) != 0) {
assertFalse("unexpected end-of-file while reading comment",numBytesRead == -1);
}
}
if ((flags & 0xe0) != 0) {
assertTrue("reserved bits are set??",(flags & 0xe0) == 0);
}
if ((flags & 0x02) != 0) {
numBytesRead=in.read(compressedBuf,0,2);
assertEquals("CRC16 bytes read",2,numBytesRead);
int crc16=((compressedBuf[1] << 8) | compressedBuf[0]) & 0xffff;
}
numBytesRead=in.read(compressedBuf);
byte[] uncompressedBuf=new byte[256];
Inflater inflater=new Inflater(true);
inflater.setInput(compressedBuf,0,numBytesRead);
try {
int numBytesUncompressed=inflater.inflate(uncompressedBuf);
String outString=new String(uncompressedBuf,0,numBytesUncompressed,"UTF-8");
System.out.println("uncompressed data of first gzip member = [" + outString + "]");
}
catch ( java.util.zip.DataFormatException ex) {
throw new IOException(ex.getMessage());
}
in.close();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test using the bzip2 codec for reading
*/
@Test public void testBzip2() throws IOException {
JobConf jobConf=new JobConf(defaultConf);
CompressionCodec bzip2=new BZip2Codec();
ReflectionUtils.setConf(bzip2,jobConf);
localFs.delete(workDir,true);
System.out.println(COLOR_BR_CYAN + "testBzip2() using non-native CBZip2InputStream (presumably)" + COLOR_NORMAL);
final String fn="concat" + bzip2.getDefaultExtension();
Path fnLocal=new Path(System.getProperty("test.concat.data","/tmp"),fn);
Path fnHDFS=new Path(workDir,fn);
localFs.copyFromLocalFile(fnLocal,fnHDFS);
writeFile(localFs,new Path(workDir,"part2.txt.bz2"),bzip2,"this is a test\nof bzip2\n");
FileInputFormat.setInputPaths(jobConf,workDir);
TextInputFormat format=new TextInputFormat();
format.configure(jobConf);
format.setMinSplitSize(256);
InputSplit[] splits=format.getSplits(jobConf,100);
assertEquals("compressed splits == 2",2,splits.length);
FileSplit tmp=(FileSplit)splits[0];
if (tmp.getPath().getName().equals("part2.txt.bz2")) {
splits[0]=splits[1];
splits[1]=tmp;
}
List results=readSplit(format,splits[0],jobConf);
assertEquals("splits[0] num lines",6,results.size());
assertEquals("splits[0][5]","member #3",results.get(5).toString());
results=readSplit(format,splits[1],jobConf);
assertEquals("splits[1] num lines",2,results.size());
assertEquals("splits[1][0]","this is a test",results.get(0).toString());
assertEquals("splits[1][1]","of bzip2",results.get(1).toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test using Hadoop's original, native-zlib gzip codec for reading.
*/
@Test public void testGzip() throws IOException {
JobConf jobConf=new JobConf(defaultConf);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,jobConf);
localFs.delete(workDir,true);
if (org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor.class == gzip.getDecompressorType()) {
System.out.println(COLOR_BR_RED + "testGzip() using native-zlib Decompressor (" + gzip.getDecompressorType()+ ")"+ COLOR_NORMAL);
}
else {
LOG.warn("testGzip() skipped: native (C/C++) libs not loaded");
return;
}
final String fn="concat" + gzip.getDefaultExtension();
Path fnLocal=new Path(System.getProperty("test.concat.data","/tmp"),fn);
Path fnHDFS=new Path(workDir,fn);
localFs.copyFromLocalFile(fnLocal,fnHDFS);
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n");
FileInputFormat.setInputPaths(jobConf,workDir);
TextInputFormat format=new TextInputFormat();
format.configure(jobConf);
InputSplit[] splits=format.getSplits(jobConf,100);
assertEquals("compressed splits == 2",2,splits.length);
FileSplit tmp=(FileSplit)splits[0];
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits[0]=splits[1];
splits[1]=tmp;
}
List results=readSplit(format,splits[0],jobConf);
assertEquals("splits[0] num lines",6,results.size());
assertEquals("splits[0][5]","member #3",results.get(5).toString());
results=readSplit(format,splits[1],jobConf);
assertEquals("splits[1] num lines",2,results.size());
assertEquals("splits[1][0]","this is a test",results.get(0).toString());
assertEquals("splits[1][1]","of gzip",results.get(1).toString());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Verify counter value works
*/
@SuppressWarnings("deprecation") @Test public void testCounterValue(){
Counters counters=new Counters();
final int NUMBER_TESTS=100;
final int NUMBER_INC=10;
final Random rand=new Random();
for (int i=0; i < NUMBER_TESTS; i++) {
long initValue=rand.nextInt();
long expectedValue=initValue;
Counter counter=counters.findCounter("foo","bar");
counter.setValue(initValue);
assertEquals("Counter value is not initialized correctly",expectedValue,counter.getValue());
for (int j=0; j < NUMBER_INC; j++) {
int incValue=rand.nextInt();
counter.increment(incValue);
expectedValue+=incValue;
assertEquals("Counter value is not incremented correctly",expectedValue,counter.getValue());
}
expectedValue=rand.nextInt();
counter.setValue(expectedValue);
assertEquals("Counter value is not set correctly",expectedValue,counter.getValue());
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=100000) public void testCustomShuffleTransfer() throws IOException {
File absLogDir=new File("target",TestFadvisedFileRegion.class.getSimpleName() + "LocDir").getAbsoluteFile();
String testDirPath=StringUtils.join(Path.SEPARATOR,new String[]{absLogDir.getAbsolutePath(),"testCustomShuffleTransfer"});
File testDir=new File(testDirPath);
testDir.mkdirs();
System.out.println(testDir.getAbsolutePath());
File inFile=new File(testDir,"fileIn.out");
File outFile=new File(testDir,"fileOut.out");
byte[] initBuff=new byte[FILE_SIZE];
Random rand=new Random();
rand.nextBytes(initBuff);
FileOutputStream out=new FileOutputStream(inFile);
try {
out.write(initBuff);
}
finally {
IOUtils.cleanup(LOG,out);
}
int position=2 * 1024 * 1024;
int count=4 * 1024 * 1024 - 1;
RandomAccessFile inputFile=null;
RandomAccessFile targetFile=null;
WritableByteChannel target=null;
FadvisedFileRegion fileRegion=null;
try {
inputFile=new RandomAccessFile(inFile.getAbsolutePath(),"r");
targetFile=new RandomAccessFile(outFile.getAbsolutePath(),"rw");
target=targetFile.getChannel();
Assert.assertEquals(FILE_SIZE,inputFile.length());
fileRegion=new FadvisedFileRegion(inputFile,position,count,false,0,null,null,1024,false);
customShuffleTransferCornerCases(fileRegion,target,count);
long pos=0;
long size;
while ((size=fileRegion.customShuffleTransfer(target,pos)) > 0) {
pos+=size;
}
Assert.assertEquals(count,(int)pos);
Assert.assertEquals(count,targetFile.length());
}
finally {
if (fileRegion != null) {
fileRegion.releaseExternalResources();
}
IOUtils.cleanup(LOG,target);
IOUtils.cleanup(LOG,targetFile);
IOUtils.cleanup(LOG,inputFile);
}
byte[] buff=new byte[FILE_SIZE];
FileInputStream in=new FileInputStream(outFile);
try {
int total=in.read(buff,0,count);
Assert.assertEquals(count,total);
for (int i=0; i < count; i++) {
Assert.assertEquals(initBuff[position + i],buff[i]);
}
}
finally {
IOUtils.cleanup(LOG,in);
}
inFile.delete();
outFile.delete();
testDir.delete();
absLogDir.delete();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test using the gzip codec with two input files.
*/
@Test(timeout=5000) public void testGzipWithTwoInputs() throws IOException {
CompressionCodec gzip=new GzipCodec();
localFs.delete(workDir,true);
FixedLengthInputFormat format=new FixedLengthInputFormat();
JobConf job=new JobConf(defaultConf);
format.setRecordLength(job,5);
FileInputFormat.setInputPaths(job,workDir);
ReflectionUtils.setConf(gzip,job);
format.configure(job);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"one two threefour five six seveneightnine ten ");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"ten nine eightsevensix five four threetwo one ");
InputSplit[] splits=format.getSplits(job,100);
assertEquals("compressed splits == 2",2,splits.length);
FileSplit tmp=(FileSplit)splits[0];
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits[0]=splits[1];
splits[1]=tmp;
}
List results=readSplit(format,splits[0],job);
assertEquals("splits[0] length",10,results.size());
assertEquals("splits[0][5]","six ",results.get(5));
results=readSplit(format,splits[1],job);
assertEquals("splits[1] length",10,results.size());
assertEquals("splits[1][0]","ten ",results.get(0));
assertEquals("splits[1][1]","nine ",results.get(1));
}
APIUtilityVerifier EqualityVerifier PublicFieldVerifier
@Test public void testIFileReaderWithCodec() throws Exception {
Configuration conf=new Configuration();
FileSystem localFs=FileSystem.getLocal(conf);
FileSystem rfs=((LocalFileSystem)localFs).getRaw();
Path path=new Path(new Path("build/test.ifile"),"data");
DefaultCodec codec=new GzipCodec();
codec.setConf(conf);
FSDataOutputStream out=rfs.create(path);
IFile.Writer writer=new IFile.Writer(conf,out,Text.class,Text.class,codec,null);
writer.close();
FSDataInputStream in=rfs.open(path);
IFile.Reader reader=new IFile.Reader(conf,in,rfs.getFileStatus(path).getLen(),codec,null);
reader.close();
byte[] ab=new byte[100];
int readed=reader.checksumIn.readWithChecksum(ab,0,ab.length);
assertEquals(readed,reader.checksumIn.getChecksum().length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testClusterNoAdmins(){
Map tmpJobACLs=new HashMap();
Configuration conf=new Configuration();
String jobOwner="testuser";
conf.set(JobACL.VIEW_JOB.getAclName(),"");
conf.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
String noAdminUser="testuser2";
JobACLsManager aclsManager=new JobACLsManager(conf);
tmpJobACLs=aclsManager.constructJobACLs(conf);
final Map jobACLs=tmpJobACLs;
UserGroupInformation callerUGI=UserGroupInformation.createUserForTesting(noAdminUser,new String[]{});
boolean val=aclsManager.checkAccess(callerUGI,JobACL.VIEW_JOB,jobOwner,jobACLs.get(JobACL.VIEW_JOB));
assertFalse("random user should not have view access",val);
val=aclsManager.checkAccess(callerUGI,JobACL.MODIFY_JOB,jobOwner,jobACLs.get(JobACL.MODIFY_JOB));
assertFalse("random user should not have modify access",val);
callerUGI=UserGroupInformation.createUserForTesting(jobOwner,new String[]{});
val=aclsManager.checkAccess(callerUGI,JobACL.VIEW_JOB,jobOwner,jobACLs.get(JobACL.VIEW_JOB));
assertTrue("owner should have view access",val);
val=aclsManager.checkAccess(callerUGI,JobACL.MODIFY_JOB,jobOwner,jobACLs.get(JobACL.MODIFY_JOB));
assertTrue("owner should have modify access",val);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testClusterAdmins(){
Map tmpJobACLs=new HashMap();
Configuration conf=new Configuration();
String jobOwner="testuser";
conf.set(JobACL.VIEW_JOB.getAclName(),jobOwner);
conf.set(JobACL.MODIFY_JOB.getAclName(),jobOwner);
conf.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
String clusterAdmin="testuser2";
conf.set(MRConfig.MR_ADMINS,clusterAdmin);
JobACLsManager aclsManager=new JobACLsManager(conf);
tmpJobACLs=aclsManager.constructJobACLs(conf);
final Map jobACLs=tmpJobACLs;
UserGroupInformation callerUGI=UserGroupInformation.createUserForTesting(clusterAdmin,new String[]{});
boolean val=aclsManager.checkAccess(callerUGI,JobACL.VIEW_JOB,jobOwner,jobACLs.get(JobACL.VIEW_JOB));
assertTrue("cluster admin should have view access",val);
val=aclsManager.checkAccess(callerUGI,JobACL.MODIFY_JOB,jobOwner,jobACLs.get(JobACL.MODIFY_JOB));
assertTrue("cluster admin should have modify access",val);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testAclsOff(){
Map tmpJobACLs=new HashMap();
Configuration conf=new Configuration();
String jobOwner="testuser";
conf.set(JobACL.VIEW_JOB.getAclName(),jobOwner);
conf.setBoolean(MRConfig.MR_ACLS_ENABLED,false);
String noAdminUser="testuser2";
JobACLsManager aclsManager=new JobACLsManager(conf);
tmpJobACLs=aclsManager.constructJobACLs(conf);
final Map jobACLs=tmpJobACLs;
UserGroupInformation callerUGI=UserGroupInformation.createUserForTesting(noAdminUser,new String[]{});
boolean val=aclsManager.checkAccess(callerUGI,JobACL.VIEW_JOB,jobOwner,jobACLs.get(JobACL.VIEW_JOB));
assertTrue("acls off so anyone should have access",val);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testGroups(){
Map tmpJobACLs=new HashMap();
Configuration conf=new Configuration();
String jobOwner="testuser";
conf.set(JobACL.VIEW_JOB.getAclName(),jobOwner);
conf.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
String user="testuser2";
String adminGroup="adminGroup";
conf.set(MRConfig.MR_ADMINS," " + adminGroup);
JobACLsManager aclsManager=new JobACLsManager(conf);
tmpJobACLs=aclsManager.constructJobACLs(conf);
final Map jobACLs=tmpJobACLs;
UserGroupInformation callerUGI=UserGroupInformation.createUserForTesting(user,new String[]{adminGroup});
boolean val=aclsManager.checkAccess(callerUGI,JobACL.VIEW_JOB,jobOwner,jobACLs.get(JobACL.VIEW_JOB));
assertTrue("user in admin group should have access",val);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testIsJobDirValid() throws IOException {
Configuration conf=new Configuration();
FileSystem fs=FileSystem.getLocal(conf);
Path testDir=new Path(TEST_DIR);
fs.mkdirs(testDir);
Assert.assertFalse(JobClient.isJobDirValid(testDir,fs));
Path jobconf=new Path(testDir,"job.xml");
Path jobsplit=new Path(testDir,"job.split");
fs.create(jobconf);
fs.create(jobsplit);
Assert.assertTrue(JobClient.isJobDirValid(testDir,fs));
fs.delete(jobconf,true);
fs.delete(jobsplit,true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Tests {@link TaskCounter}'s {@link TaskCounter.COMMITTED_HEAP_BYTES}.
* The test consists of running a low-memory job which consumes less heap
* memory and then running a high-memory job which consumes more heap memory,
* and then ensuring that COMMITTED_HEAP_BYTES of low-memory job is smaller
* than that of the high-memory job.
* @throws IOException
*/
@Test @SuppressWarnings("deprecation") public void testHeapUsageCounter() throws Exception {
JobConf conf=new JobConf();
FileSystem fileSystem=FileSystem.getLocal(conf);
Path rootDir=new Path(System.getProperty("test.build.data","/tmp"));
Path testRootDir=new Path(rootDir,"testHeapUsageCounter");
fileSystem.delete(testRootDir,true);
fileSystem.setWorkingDirectory(testRootDir);
fileSystem.deleteOnExit(testRootDir);
MiniMRCluster mrCluster=new MiniMRCluster(1,fileSystem.getUri().toString(),1);
try {
conf=mrCluster.createJobConf();
JobClient jobClient=new JobClient(conf);
Path inDir=new Path(testRootDir,"in");
createWordsFile(inDir,conf);
RunningJob lowMemJob=runHeapUsageTestJob(conf,testRootDir,"-Xms32m -Xmx1G",0,0,fileSystem,jobClient,inDir);
JobID lowMemJobID=lowMemJob.getID();
long lowMemJobMapHeapUsage=getTaskCounterUsage(jobClient,lowMemJobID,1,0,TaskType.MAP);
System.out.println("Job1 (low memory job) map task heap usage: " + lowMemJobMapHeapUsage);
long lowMemJobReduceHeapUsage=getTaskCounterUsage(jobClient,lowMemJobID,1,0,TaskType.REDUCE);
System.out.println("Job1 (low memory job) reduce task heap usage: " + lowMemJobReduceHeapUsage);
RunningJob highMemJob=runHeapUsageTestJob(conf,testRootDir,"-Xms32m -Xmx1G",lowMemJobMapHeapUsage + 256 * 1024 * 1024,lowMemJobReduceHeapUsage + 256 * 1024 * 1024,fileSystem,jobClient,inDir);
JobID highMemJobID=highMemJob.getID();
long highMemJobMapHeapUsage=getTaskCounterUsage(jobClient,highMemJobID,1,0,TaskType.MAP);
System.out.println("Job2 (high memory job) map task heap usage: " + highMemJobMapHeapUsage);
long highMemJobReduceHeapUsage=getTaskCounterUsage(jobClient,highMemJobID,1,0,TaskType.REDUCE);
System.out.println("Job2 (high memory job) reduce task heap usage: " + highMemJobReduceHeapUsage);
assertTrue("Incorrect map heap usage reported by the map task",lowMemJobMapHeapUsage < highMemJobMapHeapUsage);
assertTrue("Incorrect reduce heap usage reported by the reduce task",lowMemJobReduceHeapUsage < highMemJobReduceHeapUsage);
}
finally {
mrCluster.shutdown();
try {
fileSystem.delete(testRootDir,true);
}
catch ( IOException ioe) {
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testTaskID() throws IOException, InterruptedException {
JobID jobid=new JobID("1014873536921",6);
TaskID tid=new TaskID(jobid,TaskType.MAP,0);
org.apache.hadoop.mapred.TaskID tid1=org.apache.hadoop.mapred.TaskID.downgrade(tid);
org.apache.hadoop.mapred.TaskReport treport=new org.apache.hadoop.mapred.TaskReport(tid1,0.0f,State.FAILED.toString(),null,TIPStatus.FAILED,100,100,new org.apache.hadoop.mapred.Counters());
Assert.assertEquals(treport.getTaskId(),"task_1014873536921_0006_m_000000");
Assert.assertEquals(treport.getTaskID().toString(),"task_1014873536921_0006_m_000000");
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNewApis() throws Exception {
Random r=new Random(System.currentTimeMillis());
Path tmpBaseDir=new Path("/tmp/wc-" + r.nextInt());
final Path inDir=new Path(tmpBaseDir,"input");
final Path outDir=new Path(tmpBaseDir,"output");
String input="The quick brown fox\nhas many silly\nred fox sox\n";
FileSystem inFs=inDir.getFileSystem(conf);
FileSystem outFs=outDir.getFileSystem(conf);
outFs.delete(outDir,true);
if (!inFs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
{
DataOutputStream file=inFs.create(new Path(inDir,"part-0"));
file.writeBytes(input);
file.close();
}
Job job=Job.getInstance(conf,"word count");
job.setJarByClass(TestLocalModeWithNewApis.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job,inDir);
FileOutputFormat.setOutputPath(job,outDir);
assertEquals(job.waitForCompletion(true),true);
String output=readOutput(outDir,conf);
assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" + "quick\t1\nred\t1\nsilly\t1\nsox\t1\n",output);
outFs.delete(tmpBaseDir,true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testDeprecatedFunctions() throws Exception {
DistributedCache.addLocalArchives(conf,"Test Local Archives 1");
Assert.assertEquals("Test Local Archives 1",conf.get(DistributedCache.CACHE_LOCALARCHIVES));
Assert.assertEquals(1,DistributedCache.getLocalCacheArchives(conf).length);
Assert.assertEquals("Test Local Archives 1",DistributedCache.getLocalCacheArchives(conf)[0].getName());
DistributedCache.addLocalArchives(conf,"Test Local Archives 2");
Assert.assertEquals("Test Local Archives 1,Test Local Archives 2",conf.get(DistributedCache.CACHE_LOCALARCHIVES));
Assert.assertEquals(2,DistributedCache.getLocalCacheArchives(conf).length);
Assert.assertEquals("Test Local Archives 2",DistributedCache.getLocalCacheArchives(conf)[1].getName());
DistributedCache.setLocalArchives(conf,"Test Local Archives 3");
Assert.assertEquals("Test Local Archives 3",conf.get(DistributedCache.CACHE_LOCALARCHIVES));
Assert.assertEquals(1,DistributedCache.getLocalCacheArchives(conf).length);
Assert.assertEquals("Test Local Archives 3",DistributedCache.getLocalCacheArchives(conf)[0].getName());
DistributedCache.addLocalFiles(conf,"Test Local Files 1");
Assert.assertEquals("Test Local Files 1",conf.get(DistributedCache.CACHE_LOCALFILES));
Assert.assertEquals(1,DistributedCache.getLocalCacheFiles(conf).length);
Assert.assertEquals("Test Local Files 1",DistributedCache.getLocalCacheFiles(conf)[0].getName());
DistributedCache.addLocalFiles(conf,"Test Local Files 2");
Assert.assertEquals("Test Local Files 1,Test Local Files 2",conf.get(DistributedCache.CACHE_LOCALFILES));
Assert.assertEquals(2,DistributedCache.getLocalCacheFiles(conf).length);
Assert.assertEquals("Test Local Files 2",DistributedCache.getLocalCacheFiles(conf)[1].getName());
DistributedCache.setLocalFiles(conf,"Test Local Files 3");
Assert.assertEquals("Test Local Files 3",conf.get(DistributedCache.CACHE_LOCALFILES));
Assert.assertEquals(1,DistributedCache.getLocalCacheFiles(conf).length);
Assert.assertEquals("Test Local Files 3",DistributedCache.getLocalCacheFiles(conf)[0].getName());
DistributedCache.setArchiveTimestamps(conf,"1234567890");
Assert.assertEquals(1234567890,conf.getLong(DistributedCache.CACHE_ARCHIVES_TIMESTAMPS,0));
Assert.assertEquals(1,DistributedCache.getArchiveTimestamps(conf).length);
Assert.assertEquals(1234567890,DistributedCache.getArchiveTimestamps(conf)[0]);
DistributedCache.setFileTimestamps(conf,"1234567890");
Assert.assertEquals(1234567890,conf.getLong(DistributedCache.CACHE_FILES_TIMESTAMPS,0));
Assert.assertEquals(1,DistributedCache.getFileTimestamps(conf).length);
Assert.assertEquals(1234567890,DistributedCache.getFileTimestamps(conf)[0]);
DistributedCache.createAllSymlink(conf,new File("Test Job Cache Dir"),new File("Test Work Dir"));
Assert.assertNull(conf.get(DistributedCache.CACHE_SYMLINK));
Assert.assertTrue(DistributedCache.getSymlink(conf));
Assert.assertTrue(symlinkFile.createNewFile());
FileStatus fileStatus=DistributedCache.getFileStatus(conf,symlinkFile.toURI());
Assert.assertNotNull(fileStatus);
Assert.assertEquals(fileStatus.getModificationTime(),DistributedCache.getTimestamp(conf,symlinkFile.toURI()));
Assert.assertTrue(symlinkFile.delete());
DistributedCache.addCacheArchive(symlinkFile.toURI(),conf);
Assert.assertEquals(symlinkFile.toURI().toString(),conf.get(DistributedCache.CACHE_ARCHIVES));
Assert.assertEquals(1,DistributedCache.getCacheArchives(conf).length);
Assert.assertEquals(symlinkFile.toURI(),DistributedCache.getCacheArchives(conf)[0]);
DistributedCache.addCacheFile(symlinkFile.toURI(),conf);
Assert.assertEquals(symlinkFile.toURI().toString(),conf.get(DistributedCache.CACHE_FILES));
Assert.assertEquals(1,DistributedCache.getCacheFiles(conf).length);
Assert.assertEquals(symlinkFile.toURI(),DistributedCache.getCacheFiles(conf)[0]);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testGetMasterAddress(){
YarnConfiguration conf=new YarnConfiguration();
String masterHostname=Master.getMasterAddress(conf).getHostName();
InetSocketAddress rmAddr=NetUtils.createSocketAddr(YarnConfiguration.DEFAULT_RM_ADDRESS);
assertEquals(masterHostname,rmAddr.getHostName());
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.CLASSIC_FRAMEWORK_NAME);
conf.set(MRConfig.MASTER_ADDRESS,"local:invalid");
try {
Master.getMasterAddress(conf);
fail("Should not reach here as there is a bad master address");
}
catch ( Exception e) {
}
conf.set(MRConfig.MASTER_ADDRESS,"bar.com:8042");
masterHostname=Master.getMasterAddress(conf).getHostName();
assertEquals(masterHostname,"bar.com");
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME);
conf.set(YarnConfiguration.RM_ADDRESS,"foo1.com:8192");
masterHostname=Master.getMasterAddress(conf).getHostName();
assertEquals(masterHostname,"foo1.com");
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
/**
* To test OS dependent setting of default execution path for a MapRed task.
* Mainly that we can use MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV to set -
* for WINDOWS: %HADOOP_COMMON_HOME%\bin is expected to be included in PATH - for
* Linux: $HADOOP_COMMON_HOME/lib/native is expected to be included in
* LD_LIBRARY_PATH
*/
@Test public void testMapRedExecutionEnv(){
try {
Map environment=new HashMap();
String setupHadoopHomeCommand=Shell.WINDOWS ? "HADOOP_COMMON_HOME=C:\\fake\\PATH\\to\\hadoop\\common\\home" : "HADOOP_COMMON_HOME=/fake/path/to/hadoop/common/home";
MRApps.setEnvFromInputString(environment,setupHadoopHomeCommand,conf);
MRApps.setEnvFromInputString(environment,conf.get(MRJobConfig.MAPRED_ADMIN_USER_ENV,MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV),conf);
String executionPaths=environment.get(Shell.WINDOWS ? "PATH" : "LD_LIBRARY_PATH");
String toFind=Shell.WINDOWS ? "C:\\fake\\PATH\\to\\hadoop\\common\\home\\bin" : "/fake/path/to/hadoop/common/home/lib/native";
assertTrue("execution path does not include the hadoop lib location " + toFind,executionPaths.contains(toFind));
}
catch ( Exception e) {
e.printStackTrace();
fail("Exception in testing execution environment for MapReduce task");
tearDown();
}
try {
JobConf conf=new JobConf(mr.getConfig());
Path inDir=new Path("input");
Path outDir=new Path("output");
String input="The input";
configure(conf,inDir,outDir,input,ExecutionEnvCheckMapClass.class,IdentityReducer.class);
launchTest(conf,inDir,outDir,input);
}
catch ( Exception e) {
e.printStackTrace();
fail("Exception in testing propagation of env setting to child task");
tearDown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testExternalWritable() throws IOException {
String namenode=null;
MiniDFSCluster dfs=null;
MiniMRCluster mr=null;
FileSystem fileSys=null;
try {
final int taskTrackers=4;
Configuration conf=new Configuration();
dfs=new MiniDFSCluster.Builder(conf).build();
fileSys=dfs.getFileSystem();
namenode=fileSys.getUri().toString();
mr=new MiniMRCluster(taskTrackers,namenode,3);
JobConf jobConf=mr.createJobConf();
String result;
result=launchExternal(fileSys.getUri(),jobConf,"Dennis was here!\nDennis again!",3,1);
Assert.assertEquals("Dennis again!\t1\nDennis was here!\t1\n",result);
}
finally {
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testClassPath() throws IOException {
String namenode=null;
MiniDFSCluster dfs=null;
MiniMRCluster mr=null;
FileSystem fileSys=null;
try {
final int taskTrackers=4;
final int jobTrackerPort=60050;
Configuration conf=new Configuration();
dfs=new MiniDFSCluster.Builder(conf).build();
fileSys=dfs.getFileSystem();
namenode=fileSys.getUri().toString();
mr=new MiniMRCluster(taskTrackers,namenode,3);
JobConf jobConf=mr.createJobConf();
String result;
result=launchWordCount(fileSys.getUri(),jobConf,"The quick brown fox\nhas many silly\n" + "red fox sox\n",3,1);
Assert.assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" + "quick\t1\nred\t1\nsilly\t1\nsox\t1\n",result);
}
finally {
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.shutdown();
}
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCombiner() throws Exception {
if (!new File(TEST_ROOT_DIR).mkdirs()) {
throw new RuntimeException("Could not create test dir: " + TEST_ROOT_DIR);
}
File in=new File(TEST_ROOT_DIR,"input");
if (!in.mkdirs()) {
throw new RuntimeException("Could not create test dir: " + in);
}
File out=new File(TEST_ROOT_DIR,"output");
PrintWriter pw=new PrintWriter(new FileWriter(new File(in,"data.txt")));
pw.println("A|a,1");
pw.println("A|b,2");
pw.println("B|a,3");
pw.println("B|b,4");
pw.println("B|c,5");
pw.close();
JobConf job=new JobConf();
job.set("mapreduce.framework.name","local");
TextInputFormat.setInputPaths(job,new Path(in.getPath()));
TextOutputFormat.setOutputPath(job,new Path(out.getPath()));
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setInputFormat(TextInputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputFormat(TextOutputFormat.class);
job.setOutputValueGroupingComparator(GroupComparator.class);
job.setCombinerClass(Combiner.class);
job.setCombinerKeyGroupingComparator(GroupComparator.class);
job.setInt("min.num.spills.for.combine",0);
JobClient client=new JobClient(job);
RunningJob runningJob=client.submitJob(job);
runningJob.waitForCompletion();
if (runningJob.isSuccessful()) {
Counters counters=runningJob.getCounters();
long combinerInputRecords=counters.getGroup("org.apache.hadoop.mapreduce.TaskCounter").getCounter("COMBINE_INPUT_RECORDS");
long combinerOutputRecords=counters.getGroup("org.apache.hadoop.mapreduce.TaskCounter").getCounter("COMBINE_OUTPUT_RECORDS");
Assert.assertTrue(combinerInputRecords > 0);
Assert.assertTrue(combinerInputRecords > combinerOutputRecords);
BufferedReader br=new BufferedReader(new FileReader(new File(out,"part-00000")));
Set output=new HashSet();
String line=br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0,1) + line.substring(4,5));
line=br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0,1) + line.substring(4,5));
line=br.readLine();
Assert.assertNull(line);
br.close();
Set expected=new HashSet();
expected.add("A2");
expected.add("B5");
Assert.assertEquals(expected,output);
}
else {
Assert.fail("Job failed");
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* test deprecated methods of TaskCompletionEvent
*/
@SuppressWarnings("deprecation") @Test(timeout=5000) public void testTaskCompletionEvent(){
TaskAttemptID taid=new TaskAttemptID("001",1,TaskType.REDUCE,2,3);
TaskCompletionEvent template=new TaskCompletionEvent(12,taid,13,true,Status.SUCCEEDED,"httptracker");
TaskCompletionEvent testEl=TaskCompletionEvent.downgrade(template);
testEl.setTaskAttemptId(taid);
testEl.setTaskTrackerHttp("httpTracker");
testEl.setTaskId("attempt_001_0001_m_000002_04");
assertEquals("attempt_001_0001_m_000002_4",testEl.getTaskId());
testEl.setTaskStatus(Status.OBSOLETE);
assertEquals(Status.OBSOLETE.toString(),testEl.getStatus().toString());
testEl.setTaskRunTime(20);
assertEquals(testEl.getTaskRunTime(),20);
testEl.setEventId(16);
assertEquals(testEl.getEventId(),16);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* test deprecated methods of TaskID
* @throws IOException
*/
@SuppressWarnings("deprecation") @Test(timeout=5000) public void testDepricatedMethods() throws IOException {
JobID jid=new JobID();
TaskID test=new TaskID(jid,true,1);
assertEquals(test.getTaskType(),TaskType.MAP);
test=new TaskID(jid,false,1);
assertEquals(test.getTaskType(),TaskType.REDUCE);
test=new TaskID("001",1,false,1);
assertEquals(test.getTaskType(),TaskType.REDUCE);
test=new TaskID("001",1,true,1);
assertEquals(test.getTaskType(),TaskType.MAP);
ByteArrayOutputStream out=new ByteArrayOutputStream();
test.write(new DataOutputStream(out));
TaskID ti=TaskID.read(new DataInputStream(new ByteArrayInputStream(out.toByteArray())));
assertEquals(ti.toString(),test.toString());
assertEquals("task_001_0001_m_000002",TaskID.getTaskIDsPattern("001",1,true,2));
assertEquals("task_003_0001_m_000004",TaskID.getTaskIDsPattern("003",1,TaskType.MAP,4));
assertEquals("003_0001_m_000004",TaskID.getTaskIDsPatternWOPrefix("003",1,TaskType.MAP,4).toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* test QueueManager
* configuration from file
* @throws IOException
*/
@Test(timeout=5000) public void testQueue() throws IOException {
File f=null;
try {
f=writeFile();
QueueManager manager=new QueueManager(f.getCanonicalPath(),true);
manager.setSchedulerInfo("first","queueInfo");
manager.setSchedulerInfo("second","queueInfoqueueInfo");
Queue root=manager.getRoot();
assertTrue(root.getChildren().size() == 2);
Iterator iterator=root.getChildren().iterator();
Queue firstSubQueue=iterator.next();
assertTrue(firstSubQueue.getName().equals("first"));
assertEquals(firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job").toString(),"Users [user1, user2] and members of the groups [group1, group2] are allowed");
Queue secondSubQueue=iterator.next();
assertTrue(secondSubQueue.getName().equals("second"));
assertEquals(secondSubQueue.getProperties().getProperty("key"),"value");
assertEquals(secondSubQueue.getProperties().getProperty("key1"),"value1");
assertEquals(firstSubQueue.getState().getStateName(),"running");
assertEquals(secondSubQueue.getState().getStateName(),"stopped");
Set template=new HashSet();
template.add("first");
template.add("second");
assertEquals(manager.getLeafQueueNames(),template);
UserGroupInformation mockUGI=mock(UserGroupInformation.class);
when(mockUGI.getShortUserName()).thenReturn("user1");
String[] groups={"group1"};
when(mockUGI.getGroupNames()).thenReturn(groups);
assertTrue(manager.hasAccess("first",QueueACL.SUBMIT_JOB,mockUGI));
assertFalse(manager.hasAccess("second",QueueACL.SUBMIT_JOB,mockUGI));
assertFalse(manager.hasAccess("first",QueueACL.ADMINISTER_JOBS,mockUGI));
when(mockUGI.getShortUserName()).thenReturn("user3");
assertTrue(manager.hasAccess("first",QueueACL.ADMINISTER_JOBS,mockUGI));
QueueAclsInfo[] qai=manager.getQueueAcls(mockUGI);
assertEquals(qai.length,1);
manager.refreshQueues(getConfiguration(),null);
iterator=root.getChildren().iterator();
Queue firstSubQueue1=iterator.next();
Queue secondSubQueue1=iterator.next();
assertTrue(firstSubQueue.equals(firstSubQueue1));
assertEquals(firstSubQueue1.getState().getStateName(),"running");
assertEquals(secondSubQueue1.getState().getStateName(),"stopped");
assertEquals(firstSubQueue1.getSchedulingInfo(),"queueInfo");
assertEquals(secondSubQueue1.getSchedulingInfo(),"queueInfoqueueInfo");
assertEquals(firstSubQueue.getJobQueueInfo().getQueueName(),"first");
assertEquals(firstSubQueue.getJobQueueInfo().getQueueState(),"running");
assertEquals(firstSubQueue.getJobQueueInfo().getSchedulingInfo(),"queueInfo");
assertEquals(secondSubQueue.getJobQueueInfo().getChildren().size(),0);
assertEquals(manager.getSchedulerInfo("first"),"queueInfo");
Set queueJobQueueInfos=new HashSet();
for ( JobQueueInfo jobInfo : manager.getJobQueueInfos()) {
queueJobQueueInfos.add(jobInfo.getQueueName());
}
Set rootJobQueueInfos=new HashSet();
for ( Queue queue : root.getChildren()) {
rootJobQueueInfos.add(queue.getJobQueueInfo().getQueueName());
}
assertEquals(queueJobQueueInfos,rootJobQueueInfos);
assertEquals(manager.getJobQueueInfoMapping().get("first").getQueueName(),"first");
Writer writer=new StringWriter();
Configuration conf=getConfiguration();
conf.unset(DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY);
QueueManager.dumpConfiguration(writer,f.getAbsolutePath(),conf);
String result=writer.toString();
assertTrue(result.indexOf("\"name\":\"first\",\"state\":\"running\",\"acl_submit_job\":\"user1,user2 group1,group2\",\"acl_administer_jobs\":\"user3,user4 group3,group4\",\"properties\":[],\"children\":[]") > 0);
writer=new StringWriter();
QueueManager.dumpConfiguration(writer,conf);
result=writer.toString();
assertEquals("{\"queues\":[{\"name\":\"default\",\"state\":\"running\",\"acl_submit_job\":\"*\",\"acl_administer_jobs\":\"*\",\"properties\":[],\"children\":[]},{\"name\":\"q1\",\"state\":\"running\",\"acl_submit_job\":\" \",\"acl_administer_jobs\":\" \",\"properties\":[],\"children\":[{\"name\":\"q1:q2\",\"state\":\"running\",\"acl_submit_job\":\" \",\"acl_administer_jobs\":\" \",\"properties\":[{\"key\":\"capacity\",\"value\":\"20\"},{\"key\":\"user-limit\",\"value\":\"30\"}],\"children\":[]}]}]}",result);
QueueAclsInfo qi=new QueueAclsInfo();
assertNull(qi.getQueueName());
}
finally {
if (f != null) {
f.delete();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test for Qmanager with empty configuration
* @throws IOException
*/
@Test(timeout=5000) public void test2Queue() throws IOException {
Configuration conf=getConfiguration();
QueueManager manager=new QueueManager(conf);
manager.setSchedulerInfo("first","queueInfo");
manager.setSchedulerInfo("second","queueInfoqueueInfo");
Queue root=manager.getRoot();
assertTrue(root.getChildren().size() == 2);
Iterator iterator=root.getChildren().iterator();
Queue firstSubQueue=iterator.next();
assertTrue(firstSubQueue.getName().equals("first"));
assertEquals(firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job").toString(),"Users [user1, user2] and members of the groups [group1, group2] are allowed");
Queue secondSubQueue=iterator.next();
assertTrue(secondSubQueue.getName().equals("second"));
assertEquals(firstSubQueue.getState().getStateName(),"running");
assertEquals(secondSubQueue.getState().getStateName(),"stopped");
assertTrue(manager.isRunning("first"));
assertFalse(manager.isRunning("second"));
assertEquals(firstSubQueue.getSchedulingInfo(),"queueInfo");
assertEquals(secondSubQueue.getSchedulingInfo(),"queueInfoqueueInfo");
Set template=new HashSet();
template.add("first");
template.add("second");
assertEquals(manager.getLeafQueueNames(),template);
}
APIUtilityVerifier BooleanVerifier
/**
* test xml generation
* @throws ParserConfigurationException
* @throws Exception
*/
@Test(timeout=5000) public void testQueueConfigurationParser() throws ParserConfigurationException, Exception {
JobQueueInfo info=new JobQueueInfo("root","rootInfo");
JobQueueInfo infoChild1=new JobQueueInfo("child1","child1Info");
JobQueueInfo infoChild2=new JobQueueInfo("child2","child1Info");
info.addChild(infoChild1);
info.addChild(infoChild2);
DocumentBuilderFactory docBuilderFactory=DocumentBuilderFactory.newInstance();
DocumentBuilder builder=docBuilderFactory.newDocumentBuilder();
Document document=builder.newDocument();
Element e=QueueConfigurationParser.getQueueElement(document,info);
DOMSource domSource=new DOMSource(e);
StringWriter writer=new StringWriter();
StreamResult result=new StreamResult(writer);
TransformerFactory tf=TransformerFactory.newInstance();
Transformer transformer=tf.newTransformer();
transformer.transform(domSource,result);
String str=writer.toString();
assertTrue(str.endsWith("root running child1 running child2 running "));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testStatusLimit() throws IOException, InterruptedException, ClassNotFoundException {
Path test=new Path(testRootTempDir,"testStatusLimit");
Configuration conf=new Configuration();
Path inDir=new Path(test,"in");
Path outDir=new Path(test,"out");
FileSystem fs=FileSystem.get(conf);
if (fs.exists(inDir)) {
fs.delete(inDir,true);
}
fs.mkdirs(inDir);
DataOutputStream file=fs.create(new Path(inDir,"part-" + 0));
file.writeBytes("testStatusLimit");
file.close();
if (fs.exists(outDir)) {
fs.delete(outDir,true);
}
Job job=Job.getInstance(conf,"testStatusLimit");
job.setMapperClass(StatusLimitMapper.class);
job.setNumReduceTasks(0);
FileInputFormat.addInputPath(job,inDir);
FileOutputFormat.setOutputPath(job,outDir);
job.waitForCompletion(true);
assertTrue("Job failed",job.isSuccessful());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test {@link Reporter}'s progress for map-reduce job.
*/
@Test public void testReporterProgressForMRJob() throws IOException {
Path test=new Path(testRootTempDir,"testReporterProgressForMRJob");
JobConf conf=new JobConf();
conf.setMapperClass(ProgressTesterMapper.class);
conf.setReducerClass(ProgressTestingReducer.class);
conf.setMapOutputKeyClass(Text.class);
conf.setMaxMapAttempts(1);
conf.setMaxReduceAttempts(1);
RunningJob job=UtilsForTests.runJob(conf,new Path(test,"in"),new Path(test,"out"),1,1,INPUT);
job.waitForCompletion();
assertTrue("Job failed",job.isSuccessful());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test {@link Reporter}'s progress for a map-only job.
* This will make sure that only the map phase decides the attempt's progress.
*/
@SuppressWarnings("deprecation") @Test public void testReporterProgressForMapOnlyJob() throws IOException {
Path test=new Path(testRootTempDir,"testReporterProgressForMapOnlyJob");
JobConf conf=new JobConf();
conf.setMapperClass(ProgressTesterMapper.class);
conf.setMapOutputKeyClass(Text.class);
conf.setMaxMapAttempts(1);
conf.setMaxReduceAttempts(0);
RunningJob job=UtilsForTests.runJob(conf,new Path(test,"in"),new Path(test,"out"),1,0,INPUT);
job.waitForCompletion();
assertTrue("Job failed",job.isSuccessful());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testRecovery() throws IOException {
final String user="someuser";
final ApplicationId appId=ApplicationId.newInstance(12345,1);
final JobID jobId=JobID.downgrade(TypeConverter.fromYarn(appId));
final File tmpDir=new File(System.getProperty("test.build.data",System.getProperty("java.io.tmpdir")),TestShuffleHandler.class.getName());
Configuration conf=new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0);
conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS,3);
ShuffleHandler shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
tmpDir.mkdirs();
try {
shuffle.init(conf);
shuffle.start();
DataOutputBuffer outputBuffer=new DataOutputBuffer();
outputBuffer.reset();
Token jt=new Token("identifier".getBytes(),"password".getBytes(),new Text(user),new Text("shuffleService"));
jt.write(outputBuffer);
shuffle.initializeApplication(new ApplicationInitializationContext(user,appId,ByteBuffer.wrap(outputBuffer.getData(),0,outputBuffer.getLength())));
int rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK,rc);
shuffle.close();
shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
shuffle.start();
rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK,rc);
shuffle.stopApplication(new ApplicationTerminationContext(appId));
rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,rc);
shuffle.close();
shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
shuffle.start();
rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,rc);
}
finally {
if (shuffle != null) {
shuffle.close();
}
FileUtil.fullyDelete(tmpDir);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRecoveryFromOtherVersions() throws IOException {
final String user="someuser";
final ApplicationId appId=ApplicationId.newInstance(12345,1);
final File tmpDir=new File(System.getProperty("test.build.data",System.getProperty("java.io.tmpdir")),TestShuffleHandler.class.getName());
Configuration conf=new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0);
conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS,3);
ShuffleHandler shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
tmpDir.mkdirs();
try {
shuffle.init(conf);
shuffle.start();
DataOutputBuffer outputBuffer=new DataOutputBuffer();
outputBuffer.reset();
Token jt=new Token("identifier".getBytes(),"password".getBytes(),new Text(user),new Text("shuffleService"));
jt.write(outputBuffer);
shuffle.initializeApplication(new ApplicationInitializationContext(user,appId,ByteBuffer.wrap(outputBuffer.getData(),0,outputBuffer.getLength())));
int rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK,rc);
shuffle.close();
shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
shuffle.start();
rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK,rc);
Version version=Version.newInstance(1,0);
Assert.assertEquals(version,shuffle.getCurrentVersion());
Version version11=Version.newInstance(1,1);
shuffle.storeVersion(version11);
Assert.assertEquals(version11,shuffle.loadVersion());
shuffle.close();
shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
shuffle.start();
Assert.assertEquals(version,shuffle.loadVersion());
rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK,rc);
Version version21=Version.newInstance(2,1);
shuffle.storeVersion(version21);
Assert.assertEquals(version21,shuffle.loadVersion());
shuffle.close();
shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
try {
shuffle.start();
Assert.fail("Incompatible version, should expect fail here.");
}
catch ( ServiceStateException e) {
Assert.assertTrue("Exception message mismatch",e.getMessage().contains("Incompatible version for state DB schema:"));
}
}
finally {
if (shuffle != null) {
shuffle.close();
}
FileUtil.fullyDelete(tmpDir);
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* simulate a reducer that sends an invalid shuffle-header - sometimes a wrong
* header_name and sometimes a wrong version
* @throws Exception exception
*/
@Test(timeout=10000) public void testIncompatibleShuffleVersion() throws Exception {
final int failureNum=3;
Configuration conf=new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0);
ShuffleHandler shuffleHandler=new ShuffleHandler();
shuffleHandler.init(conf);
shuffleHandler.start();
URL url=new URL("http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_1&reduce=1&map=attempt_12345_1_m_1_0");
for (int i=0; i < failureNum; ++i) {
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,i == 0 ? "mapreduce" : "other");
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,i == 1 ? "1.0.0" : "1.0.1");
conn.connect();
Assert.assertEquals(HttpURLConnection.HTTP_BAD_REQUEST,conn.getResponseCode());
}
shuffleHandler.stop();
shuffleHandler.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Verify client prematurely closing a connection.
* @throws Exception exception.
*/
@Test(timeout=10000) public void testClientClosesConnection() throws Exception {
final ArrayList failures=new ArrayList(1);
Configuration conf=new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0);
ShuffleHandler shuffleHandler=new ShuffleHandler(){
@Override protected Shuffle getShuffle( Configuration conf){
return new Shuffle(conf){
@Override protected MapOutputInfo getMapOutputInfo( String base, String mapId, int reduce, String user) throws IOException {
return null;
}
@Override protected void populateHeaders( List mapIds, String jobId, String user, int reduce, HttpRequest request, HttpResponse response, boolean keepAliveParam, Map infoMap) throws IOException {
super.setResponseHeaders(response,keepAliveParam,100);
}
@Override protected void verifyRequest( String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException {
}
@Override protected ChannelFuture sendMapOutput( ChannelHandlerContext ctx, Channel ch, String user, String mapId, int reduce, MapOutputInfo info) throws IOException {
ShuffleHeader header=new ShuffleHeader("attempt_12345_1_m_1_0",5678,5678,1);
DataOutputBuffer dob=new DataOutputBuffer();
header.write(dob);
ch.write(wrappedBuffer(dob.getData(),0,dob.getLength()));
dob=new DataOutputBuffer();
for (int i=0; i < 100000; ++i) {
header.write(dob);
}
return ch.write(wrappedBuffer(dob.getData(),0,dob.getLength()));
}
@Override protected void sendError( ChannelHandlerContext ctx, HttpResponseStatus status){
if (failures.size() == 0) {
failures.add(new Error());
ctx.getChannel().close();
}
}
@Override protected void sendError( ChannelHandlerContext ctx, String message, HttpResponseStatus status){
if (failures.size() == 0) {
failures.add(new Error());
ctx.getChannel().close();
}
}
}
;
}
}
;
shuffleHandler.init(conf);
shuffleHandler.start();
URL url=new URL("http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_1&reduce=1&map=attempt_12345_1_m_1_0");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
conn.connect();
DataInputStream input=new DataInputStream(conn.getInputStream());
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
Assert.assertEquals("close",conn.getHeaderField(HttpHeaders.CONNECTION));
ShuffleHeader header=new ShuffleHeader();
header.readFields(input);
input.close();
shuffleHandler.stop();
Assert.assertTrue("sendError called when client closed connection",failures.size() == 0);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testKeepAlive() throws Exception {
final ArrayList failures=new ArrayList(1);
Configuration conf=new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0);
conf.setBoolean(ShuffleHandler.SHUFFLE_CONNECTION_KEEP_ALIVE_ENABLED,true);
conf.setInt(ShuffleHandler.SHUFFLE_CONNECTION_KEEP_ALIVE_TIME_OUT,-100);
ShuffleHandler shuffleHandler=new ShuffleHandler(){
@Override protected Shuffle getShuffle( final Configuration conf){
return new Shuffle(conf){
@Override protected MapOutputInfo getMapOutputInfo( String base, String mapId, int reduce, String user) throws IOException {
return null;
}
@Override protected void verifyRequest( String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException {
}
@Override protected void populateHeaders( List mapIds, String jobId, String user, int reduce, HttpRequest request, HttpResponse response, boolean keepAliveParam, Map infoMap) throws IOException {
ShuffleHeader header=new ShuffleHeader("attempt_12345_1_m_1_0",5678,5678,1);
DataOutputBuffer dob=new DataOutputBuffer();
header.write(dob);
dob=new DataOutputBuffer();
for (int i=0; i < 100000; ++i) {
header.write(dob);
}
long contentLength=dob.getLength();
if (keepAliveParam) {
connectionKeepAliveEnabled=false;
}
super.setResponseHeaders(response,keepAliveParam,contentLength);
}
@Override protected ChannelFuture sendMapOutput( ChannelHandlerContext ctx, Channel ch, String user, String mapId, int reduce, MapOutputInfo info) throws IOException {
HttpResponse response=new DefaultHttpResponse(HTTP_1_1,OK);
ShuffleHeader header=new ShuffleHeader("attempt_12345_1_m_1_0",5678,5678,1);
DataOutputBuffer dob=new DataOutputBuffer();
header.write(dob);
ch.write(wrappedBuffer(dob.getData(),0,dob.getLength()));
dob=new DataOutputBuffer();
for (int i=0; i < 100000; ++i) {
header.write(dob);
}
return ch.write(wrappedBuffer(dob.getData(),0,dob.getLength()));
}
@Override protected void sendError( ChannelHandlerContext ctx, HttpResponseStatus status){
if (failures.size() == 0) {
failures.add(new Error());
ctx.getChannel().close();
}
}
@Override protected void sendError( ChannelHandlerContext ctx, String message, HttpResponseStatus status){
if (failures.size() == 0) {
failures.add(new Error());
ctx.getChannel().close();
}
}
}
;
}
}
;
shuffleHandler.init(conf);
shuffleHandler.start();
String shuffleBaseURL="http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY);
URL url=new URL(shuffleBaseURL + "/mapOutput?job=job_12345_1&reduce=1&" + "map=attempt_12345_1_m_1_0");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
conn.connect();
DataInputStream input=new DataInputStream(conn.getInputStream());
Assert.assertEquals(HttpHeaders.KEEP_ALIVE,conn.getHeaderField(HttpHeaders.CONNECTION));
Assert.assertEquals("timeout=1",conn.getHeaderField(HttpHeaders.KEEP_ALIVE));
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
ShuffleHeader header=new ShuffleHeader();
header.readFields(input);
input.close();
url=new URL(shuffleBaseURL + "/mapOutput?job=job_12345_1&reduce=1&" + "map=attempt_12345_1_m_1_0&keepAlive=true");
conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
conn.connect();
input=new DataInputStream(conn.getInputStream());
Assert.assertEquals(HttpHeaders.KEEP_ALIVE,conn.getHeaderField(HttpHeaders.CONNECTION));
Assert.assertEquals("timeout=1",conn.getHeaderField(HttpHeaders.KEEP_ALIVE));
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
header=new ShuffleHeader();
header.readFields(input);
input.close();
}
APIUtilityVerifier BooleanVerifier AssumptionSetter HybridVerifier
/**
* Validate the ownership of the map-output files being pulled in. The
* local-file-system owner of the file should match the user component in the
* @throws Exception exception
*/
@Test(timeout=100000) public void testMapFileAccess() throws IOException {
assumeTrue(NativeIO.isAvailable());
Configuration conf=new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0);
conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS,3);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
File absLogDir=new File("target",TestShuffleHandler.class.getSimpleName() + "LocDir").getAbsoluteFile();
conf.set(YarnConfiguration.NM_LOCAL_DIRS,absLogDir.getAbsolutePath());
ApplicationId appId=ApplicationId.newInstance(12345,1);
LOG.info(appId.toString());
String appAttemptId="attempt_12345_1_m_1_0";
String user="randomUser";
String reducerId="0";
List fileMap=new ArrayList();
createShuffleHandlerFiles(absLogDir,user,appId.toString(),appAttemptId,conf,fileMap);
ShuffleHandler shuffleHandler=new ShuffleHandler(){
@Override protected Shuffle getShuffle( Configuration conf){
return new Shuffle(conf){
@Override protected void verifyRequest( String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException {
}
}
;
}
}
;
shuffleHandler.init(conf);
try {
shuffleHandler.start();
DataOutputBuffer outputBuffer=new DataOutputBuffer();
outputBuffer.reset();
Token jt=new Token("identifier".getBytes(),"password".getBytes(),new Text(user),new Text("shuffleService"));
jt.write(outputBuffer);
shuffleHandler.initializeApplication(new ApplicationInitializationContext(user,appId,ByteBuffer.wrap(outputBuffer.getData(),0,outputBuffer.getLength())));
URL url=new URL("http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_0001&reduce="+ reducerId+ "&map=attempt_12345_1_m_1_0");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
conn.connect();
byte[] byteArr=new byte[10000];
try {
DataInputStream is=new DataInputStream(conn.getInputStream());
is.readFully(byteArr);
}
catch ( EOFException e) {
}
FileInputStream is=new FileInputStream(fileMap.get(0));
String owner=NativeIO.POSIX.getFstat(is.getFD()).getOwner();
is.close();
String message="Owner '" + owner + "' for path "+ fileMap.get(0).getAbsolutePath()+ " did not match expected owner '"+ user+ "'";
Assert.assertTrue((new String(byteArr)).contains(message));
}
finally {
shuffleHandler.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test without TASK_LOG_DIR
* @throws IOException
*/
@Test(timeout=50000) public void testTaskLogWithoutTaskLogDir() throws IOException {
System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR);
assertEquals(TaskLog.getMRv2LogDir(),null);
TaskAttemptID taid=mock(TaskAttemptID.class);
JobID jid=new JobID("job",1);
when(taid.getJobID()).thenReturn(jid);
when(taid.toString()).thenReturn("JobId");
File f=TaskLog.getTaskLogFile(taid,true,LogName.STDOUT);
assertTrue(f.getAbsolutePath().endsWith("stdout"));
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test TaskAttemptID
* @throws IOException
*/
@Test(timeout=50000) public void testTaskLog() throws IOException {
System.setProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR,"testString");
assertEquals(TaskLog.getMRv2LogDir(),"testString");
TaskAttemptID taid=mock(TaskAttemptID.class);
JobID jid=new JobID("job",1);
when(taid.getJobID()).thenReturn(jid);
when(taid.toString()).thenReturn("JobId");
File f=TaskLog.getTaskLogFile(taid,true,LogName.STDOUT);
assertTrue(f.getAbsolutePath().endsWith("testString" + File.separatorChar + "stdout"));
File indexFile=TaskLog.getIndexFile(taid,true);
if (!indexFile.getParentFile().exists()) {
indexFile.getParentFile().mkdirs();
}
indexFile.delete();
indexFile.createNewFile();
TaskLog.syncLogs("location",taid,true);
assertTrue(indexFile.getAbsolutePath().endsWith("userlogs" + File.separatorChar + "job_job_0001"+ File.separatorChar+ "JobId.cleanup"+ File.separatorChar+ "log.index"));
f=TaskLog.getRealTaskLogFileLocation(taid,true,LogName.DEBUGOUT);
if (f != null) {
assertTrue(f.getAbsolutePath().endsWith("location" + File.separatorChar + "debugout"));
FileUtils.copyFile(indexFile,f);
}
assertTrue(TaskLog.obtainLogDirOwner(taid).length() > 0);
assertTrue(readTaskLog(TaskLog.LogName.DEBUGOUT,taid,true).length() > 0);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test the {@link TaskStatus} against large sized task-diagnostic-info and
* state-string. Does the following
* - create Map/Reduce TaskStatus such that the task-diagnostic-info and
* state-string are small strings and check their contents
* - append them with small string and check their contents
* - append them with large string and check their size
* - update the status using statusUpdate() calls and check the size/contents
* - create Map/Reduce TaskStatus with large string and check their size
*/
@Test public void testTaskDiagnosticsAndStateString(){
String test="hi";
final int maxSize=16;
TaskStatus status=new TaskStatus(null,0,0,null,test,test,null,null,null){
@Override protected int getMaxStringSize(){
return maxSize;
}
@Override public void addFetchFailedMap( TaskAttemptID mapTaskId){
}
@Override public boolean getIsMap(){
return false;
}
}
;
assertEquals("Small diagnostic info test failed",status.getDiagnosticInfo(),test);
assertEquals("Small state string test failed",status.getStateString(),test);
String newDInfo=test.concat(test);
status.setDiagnosticInfo(test);
status.setStateString(newDInfo);
assertEquals("Small diagnostic info append failed",newDInfo,status.getDiagnosticInfo());
assertEquals("Small state-string append failed",newDInfo,status.getStateString());
TaskStatus newStatus=(TaskStatus)status.clone();
String newSInfo="hi1";
newStatus.setStateString(newSInfo);
status.statusUpdate(newStatus);
newDInfo=newDInfo.concat(newStatus.getDiagnosticInfo());
assertEquals("Status-update on diagnostic-info failed",newDInfo,status.getDiagnosticInfo());
assertEquals("Status-update on state-string failed",newSInfo,status.getStateString());
newSInfo="hi2";
status.statusUpdate(0,newSInfo,null);
assertEquals("Status-update on state-string failed",newSInfo,status.getStateString());
newSInfo="hi3";
status.statusUpdate(null,0,newSInfo,null,0);
assertEquals("Status-update on state-string failed",newSInfo,status.getStateString());
String large="hihihihihihihihihihi";
status.setDiagnosticInfo(large);
status.setStateString(large);
assertEquals("Large diagnostic info append test failed",maxSize,status.getDiagnosticInfo().length());
assertEquals("Large state-string append test failed",maxSize,status.getStateString().length());
newStatus.setDiagnosticInfo(large + "0");
newStatus.setStateString(large + "1");
status.statusUpdate(newStatus);
assertEquals("Status-update on diagnostic info failed",maxSize,status.getDiagnosticInfo().length());
assertEquals("Status-update on state-string failed",maxSize,status.getStateString().length());
status.statusUpdate(0,large + "2",null);
assertEquals("Status-update on state-string failed",maxSize,status.getStateString().length());
status.statusUpdate(null,0,large + "3",null,0);
assertEquals("Status-update on state-string failed",maxSize,status.getStateString().length());
status=new TaskStatus(null,0,0,null,large,large,null,null,null){
@Override protected int getMaxStringSize(){
return maxSize;
}
@Override public void addFetchFailedMap( TaskAttemptID mapTaskId){
}
@Override public boolean getIsMap(){
return false;
}
}
;
assertEquals("Large diagnostic info test failed",maxSize,status.getDiagnosticInfo().length());
assertEquals("Large state-string test failed",maxSize,status.getStateString().length());
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=500000) public void testFormat() throws Exception {
JobConf job=new JobConf(defaultConf);
Path file=new Path(workDir,"test.txt");
Reporter reporter=Reporter.NULL;
int seed=new Random().nextInt();
LOG.info("seed = " + seed);
Random random=new Random(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
for (int length=0; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 10) + 1) {
LOG.debug("creating; entries = " + length);
Writer writer=new OutputStreamWriter(localFs.create(file));
try {
for (int i=0; i < length; i++) {
writer.write(Integer.toString(i));
writer.write("\n");
}
}
finally {
writer.close();
}
TextInputFormat format=new TextInputFormat();
format.configure(job);
LongWritable key=new LongWritable();
Text value=new Text();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(MAX_LENGTH / 20) + 1;
LOG.debug("splitting: requesting = " + numSplits);
InputSplit[] splits=format.getSplits(job,numSplits);
LOG.debug("splitting: got = " + splits.length);
if (length == 0) {
assertEquals("Files of length 0 are not returned from FileInputFormat.getSplits().",1,splits.length);
assertEquals("Empty file length == 0",0,splits[0].getLength());
}
BitSet bits=new BitSet(length);
for (int j=0; j < splits.length; j++) {
LOG.debug("split[" + j + "]= "+ splits[j]);
RecordReader reader=format.getRecordReader(splits[j],job,reporter);
try {
int count=0;
while (reader.next(key,value)) {
int v=Integer.parseInt(value.toString());
LOG.debug("read " + v);
if (bits.get(v)) {
LOG.warn("conflict with " + v + " in split "+ j+ " at position "+ reader.getPos());
}
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
LOG.debug("splits[" + j + "]="+ splits[j]+ " count="+ count);
}
finally {
reader.close();
}
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Test readLine for correct interpretation of maxLineLength
* (returned string should be clipped at maxLineLength, and the
* remaining bytes on the same line should be thrown out).
* Also check that returned value matches the string length.
* Varies buffer size to stress test.
* @throws Exception
*/
@Test(timeout=5000) public void testMaxLineLength() throws Exception {
final String STR="a\nbb\n\nccc\rdddd\r\neeeee";
final int STRLENBYTES=STR.getBytes().length;
Text out=new Text();
for (int bufsz=1; bufsz < STRLENBYTES + 1; ++bufsz) {
LineReader in=makeStream(STR,bufsz);
int c=0;
c+=in.readLine(out,1);
assertEquals("line1 length, bufsz: " + bufsz,1,out.getLength());
c+=in.readLine(out,1);
assertEquals("line2 length, bufsz: " + bufsz,1,out.getLength());
c+=in.readLine(out,1);
assertEquals("line3 length, bufsz: " + bufsz,0,out.getLength());
c+=in.readLine(out,3);
assertEquals("line4 length, bufsz: " + bufsz,3,out.getLength());
c+=in.readLine(out,10);
assertEquals("line5 length, bufsz: " + bufsz,4,out.getLength());
c+=in.readLine(out,8);
assertEquals("line5 length, bufsz: " + bufsz,5,out.getLength());
assertEquals("end of file, bufsz: " + bufsz,0,in.readLine(out));
assertEquals("total bytes, bufsz: " + bufsz,c,STRLENBYTES);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test using the gzip codec and an empty input file
*/
@Test(timeout=5000) public void testGzipEmpty() throws IOException {
JobConf job=new JobConf(defaultConf);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,job);
localFs.delete(workDir,true);
writeFile(localFs,new Path(workDir,"empty.gz"),gzip,"");
FileInputFormat.setInputPaths(job,workDir);
TextInputFormat format=new TextInputFormat();
format.configure(job);
InputSplit[] splits=format.getSplits(job,100);
assertEquals("Compressed files of length 0 are not returned from FileInputFormat.getSplits().",1,splits.length);
List results=readSplit(format,splits[0],job);
assertEquals("Compressed empty file length == 0",0,results.size());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=900000) public void testSplitableCodecs() throws IOException {
JobConf conf=new JobConf(defaultConf);
int seed=new Random().nextInt();
CompressionCodec codec=null;
try {
codec=(CompressionCodec)ReflectionUtils.newInstance(conf.getClassByName("org.apache.hadoop.io.compress.BZip2Codec"),conf);
}
catch ( ClassNotFoundException cnfe) {
throw new IOException("Illegal codec!");
}
Path file=new Path(workDir,"test" + codec.getDefaultExtension());
Reporter reporter=Reporter.NULL;
LOG.info("seed = " + seed);
Random random=new Random(seed);
FileSystem localFs=FileSystem.getLocal(conf);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(conf,workDir);
final int MAX_LENGTH=500000;
for (int length=MAX_LENGTH / 2; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 4) + 1) {
LOG.info("creating; entries = " + length);
Writer writer=new OutputStreamWriter(codec.createOutputStream(localFs.create(file)));
try {
for (int i=0; i < length; i++) {
writer.write(Integer.toString(i));
writer.write("\n");
}
}
finally {
writer.close();
}
TextInputFormat format=new TextInputFormat();
format.configure(conf);
LongWritable key=new LongWritable();
Text value=new Text();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(MAX_LENGTH / 2000) + 1;
LOG.info("splitting: requesting = " + numSplits);
InputSplit[] splits=format.getSplits(conf,numSplits);
LOG.info("splitting: got = " + splits.length);
BitSet bits=new BitSet(length);
for (int j=0; j < splits.length; j++) {
LOG.debug("split[" + j + "]= "+ splits[j]);
RecordReader reader=format.getRecordReader(splits[j],conf,reporter);
try {
int counter=0;
while (reader.next(key,value)) {
int v=Integer.parseInt(value.toString());
LOG.debug("read " + v);
if (bits.get(v)) {
LOG.warn("conflict with " + v + " in split "+ j+ " at position "+ reader.getPos());
}
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
counter++;
}
if (counter > 0) {
LOG.info("splits[" + j + "]="+ splits[j]+ " count="+ counter);
}
else {
LOG.debug("splits[" + j + "]="+ splits[j]+ " count="+ counter);
}
}
finally {
reader.close();
}
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Test readLine for various kinds of line termination sequneces.
* Varies buffer size to stress test. Also check that returned
* value matches the string length.
* @throws Exception
*/
@Test(timeout=5000) public void testNewLines() throws Exception {
final String STR="a\nbb\n\nccc\rdddd\r\r\r\n\r\neeeee";
final int STRLENBYTES=STR.getBytes().length;
Text out=new Text();
for (int bufsz=1; bufsz < STRLENBYTES + 1; ++bufsz) {
LineReader in=makeStream(STR,bufsz);
int c=0;
c+=in.readLine(out);
assertEquals("line1 length, bufsz:" + bufsz,1,out.getLength());
c+=in.readLine(out);
assertEquals("line2 length, bufsz:" + bufsz,2,out.getLength());
c+=in.readLine(out);
assertEquals("line3 length, bufsz:" + bufsz,0,out.getLength());
c+=in.readLine(out);
assertEquals("line4 length, bufsz:" + bufsz,3,out.getLength());
c+=in.readLine(out);
assertEquals("line5 length, bufsz:" + bufsz,4,out.getLength());
c+=in.readLine(out);
assertEquals("line6 length, bufsz:" + bufsz,0,out.getLength());
c+=in.readLine(out);
assertEquals("line7 length, bufsz:" + bufsz,0,out.getLength());
c+=in.readLine(out);
assertEquals("line8 length, bufsz:" + bufsz,0,out.getLength());
c+=in.readLine(out);
assertEquals("line9 length, bufsz:" + bufsz,5,out.getLength());
assertEquals("end of file, bufsz: " + bufsz,0,in.readLine(out));
assertEquals("total bytes, bufsz: " + bufsz,c,STRLENBYTES);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test using the gzip codec for reading
*/
@Test(timeout=5000) public void testGzip() throws IOException {
JobConf job=new JobConf(defaultConf);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,job);
localFs.delete(workDir,true);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n");
FileInputFormat.setInputPaths(job,workDir);
TextInputFormat format=new TextInputFormat();
format.configure(job);
InputSplit[] splits=format.getSplits(job,100);
assertEquals("compressed splits == 2",2,splits.length);
FileSplit tmp=(FileSplit)splits[0];
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits[0]=splits[1];
splits[1]=tmp;
}
List results=readSplit(format,splits[0],job);
assertEquals("splits[0] length",6,results.size());
assertEquals("splits[0][5]"," dog",results.get(5).toString());
results=readSplit(format,splits[1],job);
assertEquals("splits[1] length",2,results.size());
assertEquals("splits[1][0]","this is a test",results.get(0).toString());
assertEquals("splits[1][1]","of gzip",results.get(1).toString());
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testFormat() throws Exception {
JobConf job=new JobConf();
job.set(JobContext.TASK_ATTEMPT_ID,attempt);
FileOutputFormat.setOutputPath(job,workDir.getParent().getParent());
FileOutputFormat.setWorkOutputPath(job,workDir);
FileSystem fs=workDir.getFileSystem(job);
if (!fs.mkdirs(workDir)) {
fail("Failed to create output directory");
}
String file="test_format.txt";
Reporter reporter=Reporter.NULL;
TextOutputFormat
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testFormatWithCustomSeparator() throws Exception {
JobConf job=new JobConf();
String separator="\u0001";
job.set("mapreduce.output.textoutputformat.separator",separator);
job.set(JobContext.TASK_ATTEMPT_ID,attempt);
FileOutputFormat.setOutputPath(job,workDir.getParent().getParent());
FileOutputFormat.setWorkOutputPath(job,workDir);
FileSystem fs=workDir.getFileSystem(job);
if (!fs.mkdirs(workDir)) {
fail("Failed to create output directory");
}
String file="test_custom.txt";
Reporter reporter=Reporter.NULL;
TextOutputFormat theOutputFormat=new TextOutputFormat();
RecordWriter theRecordWriter=theOutputFormat.getRecordWriter(localFs,job,file,reporter);
Text key1=new Text("key1");
Text key2=new Text("key2");
Text val1=new Text("val1");
Text val2=new Text("val2");
NullWritable nullWritable=NullWritable.get();
try {
theRecordWriter.write(key1,val1);
theRecordWriter.write(null,nullWritable);
theRecordWriter.write(null,val1);
theRecordWriter.write(nullWritable,val2);
theRecordWriter.write(key2,nullWritable);
theRecordWriter.write(key1,null);
theRecordWriter.write(null,null);
theRecordWriter.write(key2,val2);
}
finally {
theRecordWriter.close(reporter);
}
File expectedFile=new File(new Path(workDir,file).toString());
StringBuffer expectedOutput=new StringBuffer();
expectedOutput.append(key1).append(separator).append(val1).append("\n");
expectedOutput.append(val1).append("\n");
expectedOutput.append(val2).append("\n");
expectedOutput.append(key2).append("\n");
expectedOutput.append(key1).append("\n");
expectedOutput.append(key2).append(separator).append(val2).append("\n");
String output=UtilsForTests.slurp(expectedFile);
assertEquals(expectedOutput.toString(),output);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=20000) public void testHistoryServerToken() throws Exception {
conf.set(YarnConfiguration.RM_PRINCIPAL,"foo@LOCAL");
final String masterPrincipal=Master.getMasterPrincipal(conf);
final MRClientProtocol hsProxy=mock(MRClientProtocol.class);
when(hsProxy.getDelegationToken(any(GetDelegationTokenRequest.class))).thenAnswer(new Answer(){
public GetDelegationTokenResponse answer( InvocationOnMock invocation){
GetDelegationTokenRequest request=(GetDelegationTokenRequest)invocation.getArguments()[0];
assertEquals(masterPrincipal,request.getRenewer());
org.apache.hadoop.yarn.api.records.Token token=recordFactory.newRecordInstance(org.apache.hadoop.yarn.api.records.Token.class);
token.setKind("");
token.setService("");
token.setIdentifier(ByteBuffer.allocate(0));
token.setPassword(ByteBuffer.allocate(0));
GetDelegationTokenResponse tokenResponse=recordFactory.newRecordInstance(GetDelegationTokenResponse.class);
tokenResponse.setDelegationToken(token);
return tokenResponse;
}
}
);
UserGroupInformation.createRemoteUser("someone").doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
yarnRunner=new YARNRunner(conf,null,null);
yarnRunner.getDelegationTokenFromHS(hsProxy);
verify(hsProxy).getDelegationToken(any(GetDelegationTokenRequest.class));
return null;
}
}
);
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=20000) public void testWarnCommandOpts() throws Exception {
Logger logger=Logger.getLogger(YARNRunner.class);
ByteArrayOutputStream bout=new ByteArrayOutputStream();
Layout layout=new SimpleLayout();
Appender appender=new WriterAppender(layout,bout);
logger.addAppender(appender);
JobConf jobConf=new JobConf();
jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS,"-Djava.net.preferIPv4Stack=true -Djava.library.path=foo");
jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS,"-Xmx1024m -Djava.library.path=bar");
YARNRunner yarnRunner=new YARNRunner(jobConf);
@SuppressWarnings("unused") ApplicationSubmissionContext submissionContext=buildSubmitContext(yarnRunner,jobConf);
String logMsg=bout.toString();
assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " + "yarn.app.mapreduce.am.admin-command-opts can cause programs to no " + "longer function if hadoop native libraries are used. These values "+ "should be set as part of the LD_LIBRARY_PATH in the app master JVM "+ "env using yarn.app.mapreduce.am.admin.user.env config settings."));
assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " + "yarn.app.mapreduce.am.command-opts can cause programs to no longer " + "function if hadoop native libraries are used. These values should "+ "be set as part of the LD_LIBRARY_PATH in the app master JVM env "+ "using yarn.app.mapreduce.am.env config settings."));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAMStandardEnv() throws Exception {
final String ADMIN_LIB_PATH="foo";
final String USER_LIB_PATH="bar";
final String USER_SHELL="shell";
JobConf jobConf=new JobConf();
jobConf.set(MRJobConfig.MR_AM_ADMIN_USER_ENV,"LD_LIBRARY_PATH=" + ADMIN_LIB_PATH);
jobConf.set(MRJobConfig.MR_AM_ENV,"LD_LIBRARY_PATH=" + USER_LIB_PATH);
jobConf.set(MRJobConfig.MAPRED_ADMIN_USER_SHELL,USER_SHELL);
YARNRunner yarnRunner=new YARNRunner(jobConf);
ApplicationSubmissionContext appSubCtx=buildSubmitContext(yarnRunner,jobConf);
ContainerLaunchContext clc=appSubCtx.getAMContainerSpec();
Map env=clc.getEnvironment();
String libPath=env.get(Environment.LD_LIBRARY_PATH.name());
assertNotNull("LD_LIBRARY_PATH not set",libPath);
String cps=jobConf.getBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,MRConfig.DEFAULT_MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM) ? ApplicationConstants.CLASS_PATH_SEPARATOR : File.pathSeparator;
assertEquals("Bad AM LD_LIBRARY_PATH setting",MRApps.crossPlatformifyMREnv(conf,Environment.PWD) + cps + ADMIN_LIB_PATH+ cps+ USER_LIB_PATH,libPath);
String shell=env.get(Environment.SHELL.name());
assertNotNull("SHELL not set",shell);
assertEquals("Bad SHELL setting",USER_SHELL,shell);
}
APIUtilityVerifier BranchVerifier BooleanVerifier
@Test(timeout=20000) public void testAMAdminCommandOpts() throws Exception {
JobConf jobConf=new JobConf();
jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS,"-Djava.net.preferIPv4Stack=true");
jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS,"-Xmx1024m");
YARNRunner yarnRunner=new YARNRunner(jobConf);
ApplicationSubmissionContext submissionContext=buildSubmitContext(yarnRunner,jobConf);
ContainerLaunchContext containerSpec=submissionContext.getAMContainerSpec();
List commands=containerSpec.getCommands();
int index=0;
int adminIndex=0;
int adminPos=-1;
int userIndex=0;
int userPos=-1;
for ( String command : commands) {
if (command != null) {
adminPos=command.indexOf("-Djava.net.preferIPv4Stack=true");
if (adminPos >= 0) adminIndex=index;
userPos=command.indexOf("-Xmx1024m");
if (userPos >= 0) userIndex=index;
}
index++;
}
assertTrue("AM admin command opts not in the commands.",adminPos > 0);
assertTrue("AM user command opts not in the commands.",userPos > 0);
if (adminIndex == userIndex) {
assertTrue("AM admin command opts is after user command opts.",adminPos < userPos);
}
else {
assertTrue("AM admin command opts is after user command opts.",adminIndex < userIndex);
}
}
APIUtilityVerifier EqualityVerifier
/**
* Tests the computation logic of uncompressed input bytes by{@link LoadJob#getUncompressedInputBytes(long,Configuration)}
*/
@Test public void testComputeUncompressedInputBytes(){
long possiblyCompressedInputBytes=100000;
float compressionRatio=0.45F;
Configuration conf=new Configuration();
CompressionEmulationUtil.setMapInputCompressionEmulationRatio(conf,compressionRatio);
long result=CompressionEmulationUtil.getUncompressedInputBytes(possiblyCompressedInputBytes,conf);
assertEquals(possiblyCompressedInputBytes,result);
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf,true);
result=CompressionEmulationUtil.getUncompressedInputBytes(possiblyCompressedInputBytes,conf);
assertEquals((long)(possiblyCompressedInputBytes / compressionRatio),result);
}
APIUtilityVerifier EqualityVerifier
/**
* Test of {@link FileQueue} can identify compressed file and provide
* readers to extract uncompressed data only if input-compression is enabled.
*/
@Test public void testFileQueueDecompression() throws IOException {
JobConf conf=new JobConf();
FileSystem lfs=FileSystem.getLocal(conf);
String inputLine="Hi Hello!";
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf,true);
org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf,true);
org.apache.hadoop.mapred.FileOutputFormat.setOutputCompressorClass(conf,GzipCodec.class);
Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")).makeQualified(lfs.getUri(),lfs.getWorkingDirectory());
Path tempDir=new Path(rootTempDir,"TestFileQueueDecompression");
lfs.delete(tempDir,true);
Path compressedFile=new Path(tempDir,"test");
OutputStream out=CompressionEmulationUtil.getPossiblyCompressedOutputStream(compressedFile,conf);
BufferedWriter writer=new BufferedWriter(new OutputStreamWriter(out));
writer.write(inputLine);
writer.close();
compressedFile=compressedFile.suffix(".gz");
long fileSize=lfs.listStatus(compressedFile)[0].getLen();
CombineFileSplit split=new CombineFileSplit(new Path[]{compressedFile},new long[]{fileSize});
FileQueue queue=new FileQueue(split,conf);
byte[] bytes=new byte[inputLine.getBytes().length];
queue.read(bytes);
queue.close();
String readLine=new String(bytes);
assertEquals("Compression/Decompression error",inputLine,readLine);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test {@link CompressionEmulationUtil#getPossiblyDecompressedInputStream(Path,Configuration,long)}and{@link CompressionEmulationUtil#getPossiblyCompressedOutputStream(Path,Configuration)}.
*/
@Test public void testPossiblyCompressedDecompressedStreams() throws IOException {
JobConf conf=new JobConf();
FileSystem lfs=FileSystem.getLocal(conf);
String inputLine="Hi Hello!";
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf,true);
conf.setBoolean(FileOutputFormat.COMPRESS,true);
conf.setClass(FileOutputFormat.COMPRESS_CODEC,GzipCodec.class,CompressionCodec.class);
Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")).makeQualified(lfs.getUri(),lfs.getWorkingDirectory());
Path tempDir=new Path(rootTempDir,"TestPossiblyCompressedDecompressedStreams");
lfs.delete(tempDir,true);
Path compressedFile=new Path(tempDir,"test");
OutputStream out=CompressionEmulationUtil.getPossiblyCompressedOutputStream(compressedFile,conf);
BufferedWriter writer=new BufferedWriter(new OutputStreamWriter(out));
writer.write(inputLine);
writer.close();
compressedFile=compressedFile.suffix(".gz");
InputStream in=CompressionEmulationUtil.getPossiblyDecompressedInputStream(compressedFile,conf,0);
BufferedReader reader=new BufferedReader(new InputStreamReader(in));
String readLine=reader.readLine();
assertEquals("Compression/Decompression error",inputLine,readLine);
reader.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test {@link RandomTextDataMapper} via {@link CompressionEmulationUtil}.
*/
@Test public void testRandomCompressedTextDataGenerator() throws Exception {
int wordSize=10;
int listSize=20;
long dataSize=10 * 1024 * 1024;
Configuration conf=new Configuration();
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf,true);
conf.setInt(RandomTextDataGenerator.GRIDMIX_DATAGEN_RANDOMTEXT_LISTSIZE,listSize);
conf.setInt(RandomTextDataGenerator.GRIDMIX_DATAGEN_RANDOMTEXT_WORDSIZE,wordSize);
conf.setLong(GenerateData.GRIDMIX_GEN_BYTES,dataSize);
conf.set("mapreduce.job.hdfs-servers","");
FileSystem lfs=FileSystem.getLocal(conf);
Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")).makeQualified(lfs.getUri(),lfs.getWorkingDirectory());
Path tempDir=new Path(rootTempDir,"TestRandomCompressedTextDataGenr");
lfs.delete(tempDir,true);
runDataGenJob(conf,tempDir);
FileStatus[] files=lfs.listStatus(tempDir,new Utils.OutputFileUtils.OutputFilesFilter());
long size=0;
long maxLineSize=0;
for ( FileStatus status : files) {
InputStream in=CompressionEmulationUtil.getPossiblyDecompressedInputStream(status.getPath(),conf,0);
BufferedReader reader=new BufferedReader(new InputStreamReader(in));
String line=reader.readLine();
if (line != null) {
long lineSize=line.getBytes().length;
if (lineSize > maxLineSize) {
maxLineSize=lineSize;
}
while (line != null) {
for ( String word : line.split("\\s")) {
size+=word.getBytes().length;
}
line=reader.readLine();
}
}
reader.close();
}
assertTrue(size >= dataSize);
assertTrue(size <= dataSize + maxLineSize);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test compressible {@link GridmixRecord}.
*/
@Test public void testCompressibleGridmixRecord() throws IOException {
JobConf conf=new JobConf();
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf,true);
FileSystem lfs=FileSystem.getLocal(conf);
int dataSize=1024 * 1024 * 10;
float ratio=0.357F;
Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")).makeQualified(lfs.getUri(),lfs.getWorkingDirectory());
Path tempDir=new Path(rootTempDir,"TestPossiblyCompressibleGridmixRecord");
lfs.delete(tempDir,true);
GridmixRecord record=new GridmixRecord(dataSize,0);
record.setCompressibility(true,ratio);
conf.setClass(FileOutputFormat.COMPRESS_CODEC,GzipCodec.class,CompressionCodec.class);
org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf,true);
Path recordFile=new Path(tempDir,"record");
OutputStream outStream=CompressionEmulationUtil.getPossiblyCompressedOutputStream(recordFile,conf);
DataOutputStream out=new DataOutputStream(outStream);
record.write(out);
out.close();
outStream.close();
Path actualRecordFile=recordFile.suffix(".gz");
InputStream in=CompressionEmulationUtil.getPossiblyDecompressedInputStream(actualRecordFile,conf,0);
long compressedFileSize=lfs.listStatus(actualRecordFile)[0].getLen();
GridmixRecord recordRead=new GridmixRecord();
recordRead.readFields(new DataInputStream(in));
assertEquals("Record size mismatch in a compressible GridmixRecord",dataSize,recordRead.getSize());
assertTrue("Failed to generate a compressible GridmixRecord",recordRead.getSize() > compressedFileSize);
float seenRatio=((float)compressedFileSize) / dataSize;
assertEquals(CompressionEmulationUtil.standardizeCompressionRatio(ratio),CompressionEmulationUtil.standardizeCompressionRatio(seenRatio),1.0D);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test the configuration property for disabling/enabling emulation of
* distributed cache load.
*/
@Test(timeout=2000) public void testDistCacheEmulationConfigurability() throws IOException {
Configuration jobConf=GridmixTestUtils.mrvl.getConfig();
Path ioPath=new Path("testDistCacheEmulationConfigurability").makeQualified(GridmixTestUtils.dfs.getUri(),GridmixTestUtils.dfs.getWorkingDirectory());
FileSystem fs=FileSystem.get(jobConf);
FileSystem.mkdirs(fs,ioPath,new FsPermission((short)0777));
dce=createDistributedCacheEmulator(jobConf,ioPath,false);
assertTrue("Default configuration of " + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE + " is wrong.",dce.shouldEmulateDistCacheLoad());
jobConf.setBoolean(DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE,false);
dce=createDistributedCacheEmulator(jobConf,ioPath,false);
assertFalse("Disabling of emulation of distributed cache load by setting " + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE + " to false is not working.",dce.shouldEmulateDistCacheLoad());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Validate GenerateDistCacheData job if it creates dist cache files properly.
* @throws Exception
*/
@Test(timeout=200000) public void testGenerateDistCacheData() throws Exception {
long[] sortedFileSizes=new long[5];
Configuration jobConf=runSetupGenerateDistCacheData(true,sortedFileSizes);
GridmixJob gridmixJob=new GenerateDistCacheData(jobConf);
Job job=gridmixJob.call();
assertEquals("Number of reduce tasks in GenerateDistCacheData is not 0.",0,job.getNumReduceTasks());
assertTrue("GenerateDistCacheData job failed.",job.waitForCompletion(false));
validateDistCacheData(jobConf,sortedFileSizes);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPool() throws Exception {
final Random r=new Random();
final Configuration conf=new Configuration();
conf.setLong(FilePool.GRIDMIX_MIN_FILE,3 * 1024);
final FilePool pool=new FilePool(conf,base);
pool.refresh();
final ArrayList files=new ArrayList();
final int expectedPoolSize=(NFILES / 2 * (NFILES / 2 + 1) - 6) * 1024;
assertEquals(expectedPoolSize,pool.getInputFiles(Long.MAX_VALUE,files));
assertEquals(NFILES - 4,files.size());
files.clear();
assertEquals(expectedPoolSize,pool.getInputFiles(expectedPoolSize,files));
files.clear();
final long rand=r.nextInt(expectedPoolSize);
assertTrue("Missed: " + rand,(NFILES / 2) * 1024 > rand - pool.getInputFiles(rand,files));
conf.setLong(FilePool.GRIDMIX_MIN_FILE,0);
pool.refresh();
files.clear();
assertEquals((NFILES / 2 * (NFILES / 2 + 1)) * 1024,pool.getInputFiles(Long.MAX_VALUE,files));
}
APIUtilityVerifier EqualityVerifier
@Test public void testRepeat() throws Exception {
final Configuration conf=new Configuration();
Arrays.fill(loc,"");
Arrays.fill(start,0L);
Arrays.fill(len,BLOCK);
final ByteArrayOutputStream out=fillVerif();
final FileQueue q=new FileQueue(new CombineFileSplit(paths,start,len,loc),conf);
final byte[] verif=out.toByteArray();
final byte[] check=new byte[2 * NFILES * BLOCK];
q.read(check,0,NFILES * BLOCK);
assertArrayEquals(verif,Arrays.copyOf(check,NFILES * BLOCK));
final byte[] verif2=new byte[2 * NFILES * BLOCK];
System.arraycopy(verif,0,verif2,0,verif.length);
System.arraycopy(verif,0,verif2,verif.length,verif.length);
q.read(check,0,2 * NFILES * BLOCK);
assertArrayEquals(verif2,check);
}
APIUtilityVerifier EqualityVerifier
@Test public void testUneven() throws Exception {
final Configuration conf=new Configuration();
Arrays.fill(loc,"");
Arrays.fill(start,0L);
Arrays.fill(len,BLOCK);
final int B2=BLOCK / 2;
for (int i=0; i < NFILES; i+=2) {
start[i]+=B2;
len[i]-=B2;
}
final FileQueue q=new FileQueue(new CombineFileSplit(paths,start,len,loc),conf);
final ByteArrayOutputStream out=fillVerif();
final byte[] verif=out.toByteArray();
final byte[] check=new byte[NFILES / 2 * BLOCK + NFILES / 2 * B2];
q.read(check,0,verif.length);
assertArrayEquals(verif,Arrays.copyOf(check,verif.length));
q.read(check,0,verif.length);
assertArrayEquals(verif,Arrays.copyOf(check,verif.length));
}
APIUtilityVerifier BranchVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testSerialReaderThread() throws Exception {
Configuration conf=new Configuration();
File fin=new File("src" + File.separator + "test"+ File.separator+ "resources"+ File.separator+ "data"+ File.separator+ "wordcount2.json");
JobStoryProducer jobProducer=new ZombieJobProducer(new Path(fin.getAbsolutePath()),null,conf);
CountDownLatch startFlag=new CountDownLatch(1);
UserResolver resolver=new SubmitterUserResolver();
FakeJobSubmitter submitter=new FakeJobSubmitter();
File ws=new File("target" + File.separator + this.getClass().getName());
if (!ws.exists()) {
Assert.assertTrue(ws.mkdirs());
}
SerialJobFactory jobFactory=new SerialJobFactory(submitter,jobProducer,new Path(ws.getAbsolutePath()),conf,startFlag,resolver);
Path ioPath=new Path(ws.getAbsolutePath());
jobFactory.setDistCacheEmulator(new DistributedCacheEmulator(conf,ioPath));
Thread test=jobFactory.createReaderThread();
test.start();
Thread.sleep(1000);
assertEquals(0,submitter.getJobs().size());
startFlag.countDown();
while (test.isAlive()) {
Thread.sleep(1000);
jobFactory.update(null);
}
assertEquals(2,submitter.getJobs().size());
}
APIUtilityVerifier EqualityVerifier
@SuppressWarnings({"rawtypes","unchecked"}) @Test(timeout=10000) public void testLoadMapper() throws Exception {
Configuration conf=new Configuration();
conf.setInt(JobContext.NUM_REDUCES,2);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true);
TaskAttemptID taskId=new TaskAttemptID();
RecordReader reader=new FakeRecordReader();
LoadRecordGkGrWriter writer=new LoadRecordGkGrWriter();
OutputCommitter committer=new CustomOutputCommitter();
StatusReporter reporter=new TaskAttemptContextImpl.DummyReporter();
LoadSplit split=getLoadSplit();
MapContext mapContext=new MapContextImpl(conf,taskId,reader,writer,committer,reporter,split);
Context ctx=new WrappedMapper().getMapContext(mapContext);
reader.initialize(split,ctx);
ctx.getConfiguration().setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true);
CompressionEmulationUtil.setCompressionEmulationEnabled(ctx.getConfiguration(),true);
LoadJob.LoadMapper mapper=new LoadJob.LoadMapper();
mapper.run(ctx);
Map data=writer.getData();
assertEquals(2,data.size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=3000) public void testLoadJobLoadSortComparator() throws Exception {
LoadJob.LoadSortComparator test=new LoadJob.LoadSortComparator();
ByteArrayOutputStream data=new ByteArrayOutputStream();
DataOutputStream dos=new DataOutputStream(data);
WritableUtils.writeVInt(dos,2);
WritableUtils.writeVInt(dos,1);
WritableUtils.writeVInt(dos,4);
WritableUtils.writeVInt(dos,7);
WritableUtils.writeVInt(dos,4);
byte[] b1=data.toByteArray();
byte[] b2=data.toByteArray();
assertEquals(0,test.compare(b1,0,1,b2,0,1));
b2[2]=5;
assertEquals(-1,test.compare(b1,0,1,b2,0,1));
b2[2]=2;
assertEquals(2,test.compare(b1,0,1,b2,0,1));
b2[2]=4;
assertEquals(1,test.compare(b1,0,1,b2,1,1));
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=3000) public void testLoadJobLoadReducer() throws Exception {
LoadJob.LoadReducer test=new LoadJob.LoadReducer();
Configuration conf=new Configuration();
conf.setInt(JobContext.NUM_REDUCES,2);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
conf.setBoolean(FileOutputFormat.COMPRESS,true);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true);
TaskAttemptID taskid=new TaskAttemptID();
RawKeyValueIterator input=new FakeRawKeyValueIterator();
Counter counter=new GenericCounter();
Counter inputValueCounter=new GenericCounter();
LoadRecordWriter output=new LoadRecordWriter();
OutputCommitter committer=new CustomOutputCommitter();
StatusReporter reporter=new DummyReporter();
RawComparator comparator=new FakeRawComparator();
ReduceContext reduceContext=new ReduceContextImpl(conf,taskid,input,counter,inputValueCounter,output,committer,reporter,comparator,GridmixKey.class,GridmixRecord.class);
reduceContext.nextKeyValue();
org.apache.hadoop.mapreduce.Reducer.Context context=new WrappedReducer().getReducerContext(reduceContext);
test.run(context);
assertEquals(9,counter.getValue());
assertEquals(10,inputValueCounter.getValue());
assertEquals(1,output.getData().size());
GridmixRecord record=output.getData().values().iterator().next();
assertEquals(1593,record.getSize());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=3000) public void testGridmixJobSpecGroupingComparator() throws Exception {
GridmixJob.SpecGroupingComparator test=new GridmixJob.SpecGroupingComparator();
ByteArrayOutputStream data=new ByteArrayOutputStream();
DataOutputStream dos=new DataOutputStream(data);
WritableUtils.writeVInt(dos,2);
WritableUtils.writeVInt(dos,1);
WritableUtils.writeVInt(dos,0);
WritableUtils.writeVInt(dos,7);
WritableUtils.writeVInt(dos,4);
byte[] b1=data.toByteArray();
byte[] b2=data.toByteArray();
assertEquals(0,test.compare(b1,0,1,b2,0,1));
b2[2]=1;
assertEquals(-1,test.compare(b1,0,1,b2,0,1));
b2[2]=1;
assertEquals(-1,test.compare(b1,0,1,b2,0,1));
assertEquals(0,test.compare(new GridmixKey(GridmixKey.DATA,100,2),new GridmixKey(GridmixKey.DATA,100,2)));
assertEquals(-1,test.compare(new GridmixKey(GridmixKey.REDUCE_SPEC,100,2),new GridmixKey(GridmixKey.DATA,100,2)));
assertEquals(1,test.compare(new GridmixKey(GridmixKey.DATA,100,2),new GridmixKey(GridmixKey.REDUCE_SPEC,100,2)));
assertEquals(2,test.compare(new GridmixKey(GridmixKey.DATA,102,2),new GridmixKey(GridmixKey.DATA,100,2)));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) public void testMain() throws Exception {
SecurityManager securityManager=System.getSecurityManager();
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream out=new PrintStream(bytes);
final PrintStream oldOut=System.out;
System.setErr(out);
ExitUtil.disableSystemExit();
try {
String[] argv=new String[0];
DebugGridmix.main(argv);
}
catch ( ExitUtil.ExitException e) {
assertEquals("ExitException",e.getMessage());
ExitUtil.resetFirstExitException();
}
finally {
System.setErr(oldOut);
System.setSecurityManager(securityManager);
}
String print=bytes.toString();
assertTrue(print.contains("Usage: gridmix [-generate ] [-users URI] [-Dname=value ...] "));
assertTrue(print.contains("e.g. gridmix -generate 100m foo -"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test {@link DataStatistics}.
*/
@Test public void testDataStatistics() throws Exception {
DataStatistics stats=new DataStatistics(10,2,true);
assertEquals("Data size mismatch",10,stats.getDataSize());
assertEquals("Num files mismatch",2,stats.getNumFiles());
assertTrue("Compression configuration mismatch",stats.isDataCompressed());
stats=new DataStatistics(100,5,false);
assertEquals("Data size mismatch",100,stats.getDataSize());
assertEquals("Num files mismatch",5,stats.getNumFiles());
assertFalse("Compression configuration mismatch",stats.isDataCompressed());
Configuration conf=new Configuration();
Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp"));
Path testDir=new Path(rootTempDir,"testDataStatistics");
FileSystem fs=testDir.getFileSystem(conf);
fs.delete(testDir,true);
Path testInputDir=new Path(testDir,"test");
fs.mkdirs(testInputDir);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
Boolean failed=null;
try {
GenerateData.publishDataStatistics(testInputDir,1024L,conf);
failed=false;
}
catch ( RuntimeException e) {
failed=true;
}
assertNotNull("Expected failure!",failed);
assertTrue("Compression data publishing error",failed);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,false);
stats=GenerateData.publishDataStatistics(testInputDir,1024L,conf);
assertEquals("Data size mismatch",0,stats.getDataSize());
assertEquals("Num files mismatch",0,stats.getNumFiles());
assertFalse("Compression configuration mismatch",stats.isDataCompressed());
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,false);
Path inputDataFile=new Path(testInputDir,"test");
long size=UtilsForTests.createTmpFileDFS(fs,inputDataFile,FsPermission.createImmutable((short)777),"hi hello bye").size();
stats=GenerateData.publishDataStatistics(testInputDir,-1,conf);
assertEquals("Data size mismatch",size,stats.getDataSize());
assertEquals("Num files mismatch",1,stats.getNumFiles());
assertFalse("Compression configuration mismatch",stats.isDataCompressed());
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
failed=null;
try {
GenerateData.publishDataStatistics(testInputDir,1234L,conf);
failed=false;
}
catch ( RuntimeException e) {
failed=true;
}
assertNotNull("Expected failure!",failed);
assertTrue("Compression data publishing error",failed);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,false);
fs.delete(inputDataFile,false);
inputDataFile=new Path(testInputDir,"test.gz");
size=UtilsForTests.createTmpFileDFS(fs,inputDataFile,FsPermission.createImmutable((short)777),"hi hello").size();
stats=GenerateData.publishDataStatistics(testInputDir,1234L,conf);
assertEquals("Data size mismatch",size,stats.getDataSize());
assertEquals("Num files mismatch",1,stats.getNumFiles());
assertFalse("Compression configuration mismatch",stats.isDataCompressed());
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
stats=GenerateData.publishDataStatistics(testInputDir,1234L,conf);
assertEquals("Data size mismatch",size,stats.getDataSize());
assertEquals("Num files mismatch",1,stats.getNumFiles());
assertTrue("Compression configuration mismatch",stats.isDataCompressed());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test {@link ExecutionSummarizer}.
*/
@Test @SuppressWarnings({"unchecked","rawtypes"}) public void testExecutionSummarizer() throws IOException {
Configuration conf=new Configuration();
ExecutionSummarizer es=new ExecutionSummarizer();
assertEquals("ExecutionSummarizer init failed",Summarizer.NA,es.getCommandLineArgsString());
long startTime=System.currentTimeMillis();
String[] initArgs=new String[]{"-Xmx20m","-Dtest.args='test'"};
es=new ExecutionSummarizer(initArgs);
assertEquals("ExecutionSummarizer init failed","-Xmx20m -Dtest.args='test'",es.getCommandLineArgsString());
assertTrue("Start time mismatch",es.getStartTime() >= startTime);
assertTrue("Start time mismatch",es.getStartTime() <= System.currentTimeMillis());
es.update(null);
assertEquals("ExecutionSummarizer init failed",0,es.getSimulationStartTime());
testExecutionSummarizer(0,0,0,0,0,0,0,es);
long simStartTime=System.currentTimeMillis();
es.start(null);
assertTrue("Simulation start time mismatch",es.getSimulationStartTime() >= simStartTime);
assertTrue("Simulation start time mismatch",es.getSimulationStartTime() <= System.currentTimeMillis());
JobStats stats=generateFakeJobStats(1,10,true,false);
es.update(stats);
testExecutionSummarizer(1,10,0,1,1,0,0,es);
stats=generateFakeJobStats(5,1,false,false);
es.update(stats);
testExecutionSummarizer(6,11,0,2,1,1,0,es);
stats=generateFakeJobStats(1,1,true,true);
es.update(stats);
testExecutionSummarizer(7,12,0,3,1,1,1,es);
stats=generateFakeJobStats(2,2,false,true);
es.update(stats);
testExecutionSummarizer(9,14,0,4,1,1,2,es);
JobFactory factory=new FakeJobFactory(conf);
factory.numJobsInTrace=3;
Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp"));
Path testDir=new Path(rootTempDir,"testGridmixSummary");
Path testTraceFile=new Path(testDir,"test-trace.json");
FileSystem fs=FileSystem.getLocal(conf);
fs.create(testTraceFile).close();
UserResolver resolver=new RoundRobinUserResolver();
DataStatistics dataStats=new DataStatistics(100,2,true);
String policy=GridmixJobSubmissionPolicy.REPLAY.name();
conf.set(GridmixJobSubmissionPolicy.JOB_SUBMISSION_POLICY,policy);
es.finalize(factory,testTraceFile.toString(),1024L,resolver,dataStats,conf);
assertEquals("Mismtach in num jobs in trace",3,es.getNumJobsInTrace());
String tid=ExecutionSummarizer.getTraceSignature(testTraceFile.toString());
assertEquals("Mismatch in trace signature",tid,es.getInputTraceSignature());
Path qPath=fs.makeQualified(testTraceFile);
assertEquals("Mismatch in trace filename",qPath.toString(),es.getInputTraceLocation());
assertEquals("Mismatch in expected data size","1 K",es.getExpectedDataSize());
assertEquals("Mismatch in input data statistics",ExecutionSummarizer.stringifyDataStatistics(dataStats),es.getInputDataStatistics());
assertEquals("Mismatch in user resolver",resolver.getClass().getName(),es.getUserResolver());
assertEquals("Mismatch in policy",policy,es.getJobSubmissionPolicy());
es.finalize(factory,testTraceFile.toString(),1024 * 1024 * 1024* 10L,resolver,dataStats,conf);
assertEquals("Mismatch in expected data size","10 G",es.getExpectedDataSize());
fs.delete(testTraceFile,false);
try {
Thread.sleep(1000);
}
catch ( InterruptedException ie) {
}
fs.create(testTraceFile).close();
es.finalize(factory,testTraceFile.toString(),0L,resolver,dataStats,conf);
assertEquals("Mismatch in trace data size",Summarizer.NA,es.getExpectedDataSize());
assertFalse("Mismatch in trace signature",tid.equals(es.getInputTraceSignature()));
tid=ExecutionSummarizer.getTraceSignature(testTraceFile.toString());
assertEquals("Mismatch in trace signature",tid,es.getInputTraceSignature());
testTraceFile=new Path(testDir,"test-trace2.json");
fs.create(testTraceFile).close();
es.finalize(factory,testTraceFile.toString(),0L,resolver,dataStats,conf);
assertFalse("Mismatch in trace signature",tid.equals(es.getInputTraceSignature()));
tid=ExecutionSummarizer.getTraceSignature(testTraceFile.toString());
assertEquals("Mismatch in trace signature",tid,es.getInputTraceSignature());
es.finalize(factory,"-",0L,resolver,dataStats,conf);
assertEquals("Mismatch in trace signature",Summarizer.NA,es.getInputTraceSignature());
assertEquals("Mismatch in trace file location",Summarizer.NA,es.getInputTraceLocation());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test Pseudo Local File System methods like getFileStatus(), create(),
* open(), exists() for valid file paths and invalid file paths.
* @throws IOException
*/
@Test public void testPseudoLocalFsFileNames() throws IOException {
PseudoLocalFs pfs=new PseudoLocalFs();
Configuration conf=new Configuration();
conf.setClass("fs.pseudo.impl",PseudoLocalFs.class,FileSystem.class);
Path path=new Path("pseudo:///myPsedoFile.1234");
FileSystem testFs=path.getFileSystem(conf);
assertEquals("Failed to obtain a pseudo local file system object from path",pfs.getUri().getScheme(),testFs.getUri().getScheme());
path=new Path("file:///myPsedoFile.12345");
validateGetFileStatus(pfs,path,false);
validateCreate(pfs,path,false);
validateOpen(pfs,path,false);
validateExists(pfs,path,false);
path=new Path("pseudo:///myPsedoFile");
validateGetFileStatus(pfs,path,false);
validateCreate(pfs,path,false);
validateOpen(pfs,path,false);
validateExists(pfs,path,false);
path=new Path("pseudo:///myPsedoFile.txt");
validateGetFileStatus(pfs,path,false);
validateCreate(pfs,path,false);
validateOpen(pfs,path,false);
validateExists(pfs,path,false);
long fileSize=231456;
path=PseudoLocalFs.generateFilePath("my.Psedo.File",fileSize);
assertEquals("generateFilePath() failed.",fileSize,pfs.validateFileNameFormat(path));
validateGetFileStatus(pfs,path,true);
validateCreate(pfs,path,true);
validateOpen(pfs,path,true);
validateExists(pfs,path,true);
path=new Path("myPsedoFile.1237");
path=path.makeQualified(pfs);
validateGetFileStatus(pfs,path,true);
validateCreate(pfs,path,true);
validateOpen(pfs,path,true);
validateExists(pfs,path,true);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test if a file on PseudoLocalFs of a specific size can be opened and read.
* Validate the size of the data read.
* Test the read methods of {@link PseudoLocalFs.RandomInputStream}.
* @throws Exception
*/
@Test public void testPseudoLocalFsFileSize() throws Exception {
long fileSize=10000;
Path path=PseudoLocalFs.generateFilePath("myPsedoFile",fileSize);
PseudoLocalFs pfs=new PseudoLocalFs();
pfs.create(path);
InputStream in=pfs.open(path,0);
long totalSize=0;
while (in.read() >= 0) {
++totalSize;
}
in.close();
assertEquals("File size mismatch with read().",fileSize,totalSize);
in=pfs.open(path,0);
totalSize=0;
byte[] b=new byte[1024];
int bytesRead=in.read(b);
while (bytesRead >= 0) {
totalSize+=bytesRead;
bytesRead=in.read(b);
}
assertEquals("File size mismatch with read(byte[]).",fileSize,totalSize);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testMapTasksOnlySleepJobs() throws Exception {
Configuration configuration=GridmixTestUtils.mrvl.getConfig();
DebugJobProducer jobProducer=new DebugJobProducer(5,configuration);
configuration.setBoolean(SleepJob.SLEEPJOB_MAPTASK_ONLY,true);
UserGroupInformation ugi=UserGroupInformation.getLoginUser();
JobStory story;
int seq=1;
while ((story=jobProducer.getNextJob()) != null) {
GridmixJob gridmixJob=JobCreator.SLEEPJOB.createGridmixJob(configuration,0,story,new Path("ignored"),ugi,seq++);
gridmixJob.buildSplits(null);
Job job=gridmixJob.call();
assertEquals(0,job.getNumReduceTasks());
}
jobProducer.close();
assertEquals(6,seq);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test PipesMapRunner test the transfer data from reader
* @throws Exception
*/
@Test public void testRunner() throws Exception {
File[] psw=cleanTokenPasswordFile();
try {
RecordReader rReader=new ReaderPipesMapRunner();
JobConf conf=new JobConf();
conf.set(Submitter.IS_JAVA_RR,"true");
conf.set(MRJobConfig.TASK_ATTEMPT_ID,taskName);
CombineOutputCollector output=new CombineOutputCollector(new Counters.Counter(),new Progress());
FileSystem fs=new RawLocalFileSystem();
fs.setConf(conf);
Writer wr=new Writer(conf,fs.create(new Path(workSpace + File.separator + "outfile")),IntWritable.class,Text.class,null,null,true);
output.setWriter(wr);
File fCommand=getFileCommand("org.apache.hadoop.mapred.pipes.PipeApplicationRunnableStub");
conf.set(MRJobConfig.CACHE_LOCALFILES,fCommand.getAbsolutePath());
Token token=new Token("user".getBytes(),"password".getBytes(),new Text("kind"),new Text("service"));
TokenCache.setJobToken(token,conf.getCredentials());
conf.setBoolean(MRJobConfig.SKIP_RECORDS,true);
TestTaskReporter reporter=new TestTaskReporter();
PipesMapRunner runner=new PipesMapRunner();
initStdOut(conf);
runner.configure(conf);
runner.run(rReader,output,reporter);
String stdOut=readStdOut(conf);
assertTrue(stdOut.contains("CURRENT_PROTOCOL_VERSION:0"));
assertTrue(stdOut.contains("Key class:org.apache.hadoop.io.FloatWritable"));
assertTrue(stdOut.contains("Value class:org.apache.hadoop.io.NullWritable"));
assertTrue(stdOut.contains("value:0.0"));
assertTrue(stdOut.contains("value:9.0"));
}
finally {
if (psw != null) {
for ( File file : psw) {
file.deleteOnExit();
}
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* test org.apache.hadoop.mapred.pipes.Application
* test a internal functions: MessageType.REGISTER_COUNTER, INCREMENT_COUNTER, STATUS, PROGRESS...
* @throws Throwable
*/
@Test public void testApplication() throws Throwable {
JobConf conf=new JobConf();
RecordReader rReader=new Reader();
File fCommand=getFileCommand("org.apache.hadoop.mapred.pipes.PipeApplicationStub");
TestTaskReporter reporter=new TestTaskReporter();
File[] psw=cleanTokenPasswordFile();
try {
conf.set(MRJobConfig.TASK_ATTEMPT_ID,taskName);
conf.set(MRJobConfig.CACHE_LOCALFILES,fCommand.getAbsolutePath());
Token token=new Token("user".getBytes(),"password".getBytes(),new Text("kind"),new Text("service"));
TokenCache.setJobToken(token,conf.getCredentials());
FakeCollector output=new FakeCollector(new Counters.Counter(),new Progress());
FileSystem fs=new RawLocalFileSystem();
fs.setConf(conf);
Writer wr=new Writer(conf,fs.create(new Path(workSpace.getAbsolutePath() + File.separator + "outfile")),IntWritable.class,Text.class,null,null,true);
output.setWriter(wr);
conf.set(Submitter.PRESERVE_COMMANDFILE,"true");
initStdOut(conf);
Application,Writable,IntWritable,Text> application=new Application,Writable,IntWritable,Text>(conf,rReader,output,reporter,IntWritable.class,Text.class);
application.getDownlink().flush();
application.getDownlink().mapItem(new IntWritable(3),new Text("txt"));
application.getDownlink().flush();
application.waitForFinish();
wr.close();
String stdOut=readStdOut(conf);
assertTrue(stdOut.contains("key:3"));
assertTrue(stdOut.contains("value:txt"));
assertEquals(1.0,reporter.getProgress(),0.01);
assertNotNull(reporter.getCounter("group","name"));
assertEquals(reporter.getStatus(),"PROGRESS");
stdOut=readFile(new File(workSpace.getAbsolutePath() + File.separator + "outfile"));
assertEquals(0.55f,rReader.getProgress(),0.001);
application.getDownlink().close();
Entry entry=output.getCollect().entrySet().iterator().next();
assertEquals(123,entry.getKey().get());
assertEquals("value",entry.getValue().toString());
try {
application.abort(new Throwable());
fail();
}
catch ( IOException e) {
assertEquals("pipe child exception",e.getMessage());
}
}
finally {
if (psw != null) {
for ( File file : psw) {
file.deleteOnExit();
}
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test org.apache.hadoop.mapred.pipes.PipesReducer
* test the transfer of data: key and value
* @throws Exception
*/
@Test public void testPipesReduser() throws Exception {
File[] psw=cleanTokenPasswordFile();
JobConf conf=new JobConf();
try {
Token token=new Token("user".getBytes(),"password".getBytes(),new Text("kind"),new Text("service"));
TokenCache.setJobToken(token,conf.getCredentials());
File fCommand=getFileCommand("org.apache.hadoop.mapred.pipes.PipeReducerStub");
conf.set(MRJobConfig.CACHE_LOCALFILES,fCommand.getAbsolutePath());
PipesReducer reducer=new PipesReducer();
reducer.configure(conf);
BooleanWritable bw=new BooleanWritable(true);
conf.set(MRJobConfig.TASK_ATTEMPT_ID,taskName);
initStdOut(conf);
conf.setBoolean(MRJobConfig.SKIP_RECORDS,true);
CombineOutputCollector output=new CombineOutputCollector(new Counters.Counter(),new Progress());
Reporter reporter=new TestTaskReporter();
List texts=new ArrayList();
texts.add(new Text("first"));
texts.add(new Text("second"));
texts.add(new Text("third"));
reducer.reduce(bw,texts.iterator(),output,reporter);
reducer.close();
String stdOut=readStdOut(conf);
assertTrue(stdOut.contains("reducer key :true"));
assertTrue(stdOut.contains("reduce value :first"));
assertTrue(stdOut.contains("reduce value :second"));
assertTrue(stdOut.contains("reduce value :third"));
}
finally {
if (psw != null) {
for ( File file : psw) {
file.deleteOnExit();
}
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Verify counter value works
*/
@Test public void testCounterValue(){
final int NUMBER_TESTS=100;
final int NUMBER_INC=10;
final Random rand=new Random();
for (int i=0; i < NUMBER_TESTS; i++) {
long initValue=rand.nextInt();
long expectedValue=initValue;
Counter counter=new Counters().findCounter("test","foo");
counter.setValue(initValue);
assertEquals("Counter value is not initialized correctly",expectedValue,counter.getValue());
for (int j=0; j < NUMBER_INC; j++) {
int incValue=rand.nextInt();
counter.increment(incValue);
expectedValue+=incValue;
assertEquals("Counter value is not incremented correctly",expectedValue,counter.getValue());
}
expectedValue=rand.nextInt();
counter.setValue(expectedValue);
assertEquals("Counter value is not set correctly",expectedValue,counter.getValue());
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testJobToString() throws IOException, InterruptedException {
Cluster cluster=mock(Cluster.class);
ClientProtocol client=mock(ClientProtocol.class);
when(cluster.getClient()).thenReturn(client);
JobID jobid=new JobID("1014873536921",6);
JobStatus status=new JobStatus(jobid,0.0f,0.0f,0.0f,0.0f,State.FAILED,JobPriority.NORMAL,"root","TestJobToString","job file","tracking url");
when(client.getJobStatus(jobid)).thenReturn(status);
when(client.getTaskReports(jobid,TaskType.MAP)).thenReturn(new TaskReport[0]);
when(client.getTaskReports(jobid,TaskType.REDUCE)).thenReturn(new TaskReport[0]);
when(client.getTaskCompletionEvents(jobid,0,10)).thenReturn(new TaskCompletionEvent[0]);
Job job=Job.getInstance(cluster,status,new JobConf());
Assert.assertNotNull(job.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testJobMonitorAndPrint() throws Exception {
JobStatus jobStatus_1=new JobStatus(new JobID("job_000",1),1f,0.1f,0.1f,0f,State.RUNNING,JobPriority.HIGH,"tmp-user","tmp-jobname","tmp-queue","tmp-jobfile","tmp-url",true);
JobStatus jobStatus_2=new JobStatus(new JobID("job_000",1),1f,1f,1f,1f,State.SUCCEEDED,JobPriority.HIGH,"tmp-user","tmp-jobname","tmp-queue","tmp-jobfile","tmp-url",true);
doAnswer(new Answer(){
@Override public TaskCompletionEvent[] answer( InvocationOnMock invocation) throws Throwable {
return new TaskCompletionEvent[0];
}
}
).when(job).getTaskCompletionEvents(anyInt(),anyInt());
doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class));
when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1,jobStatus_2);
Layout layout=Logger.getRootLogger().getAppender("stdout").getLayout();
ByteArrayOutputStream os=new ByteArrayOutputStream();
WriterAppender appender=new WriterAppender(layout,os);
appender.setThreshold(Level.ALL);
Logger qlogger=Logger.getLogger(Job.class);
qlogger.addAppender(appender);
job.monitorAndPrintJob();
qlogger.removeAppender(appender);
LineNumberReader r=new LineNumberReader(new StringReader(os.toString()));
String line;
boolean foundHundred=false;
boolean foundComplete=false;
boolean foundUber=false;
String uberModeMatch="uber mode : true";
String progressMatch="map 100% reduce 100%";
String completionMatch="completed successfully";
while ((line=r.readLine()) != null) {
if (line.contains(uberModeMatch)) {
foundUber=true;
}
foundHundred=line.contains(progressMatch);
if (foundHundred) break;
}
line=r.readLine();
foundComplete=line.contains(completionMatch);
assertTrue(foundUber);
assertTrue(foundHundred);
assertTrue(foundComplete);
System.out.println("The output of job.toString() is : \n" + job.toString());
assertTrue(job.toString().contains("Number of maps: 5\n"));
assertTrue(job.toString().contains("Number of reduces: 5\n"));
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCombiner() throws Exception {
if (!new File(TEST_ROOT_DIR).mkdirs()) {
throw new RuntimeException("Could not create test dir: " + TEST_ROOT_DIR);
}
File in=new File(TEST_ROOT_DIR,"input");
if (!in.mkdirs()) {
throw new RuntimeException("Could not create test dir: " + in);
}
File out=new File(TEST_ROOT_DIR,"output");
PrintWriter pw=new PrintWriter(new FileWriter(new File(in,"data.txt")));
pw.println("A|a,1");
pw.println("A|b,2");
pw.println("B|a,3");
pw.println("B|b,4");
pw.println("B|c,5");
pw.close();
JobConf conf=new JobConf();
conf.set("mapreduce.framework.name","local");
Job job=new Job(conf);
TextInputFormat.setInputPaths(job,new Path(in.getPath()));
TextOutputFormat.setOutputPath(job,new Path(out.getPath()));
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setInputFormatClass(TextInputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setGroupingComparatorClass(GroupComparator.class);
job.setCombinerKeyGroupingComparatorClass(GroupComparator.class);
job.setCombinerClass(Combiner.class);
job.getConfiguration().setInt("min.num.spills.for.combine",0);
job.submit();
job.waitForCompletion(false);
if (job.isSuccessful()) {
Counters counters=job.getCounters();
long combinerInputRecords=counters.findCounter("org.apache.hadoop.mapreduce.TaskCounter","COMBINE_INPUT_RECORDS").getValue();
long combinerOutputRecords=counters.findCounter("org.apache.hadoop.mapreduce.TaskCounter","COMBINE_OUTPUT_RECORDS").getValue();
Assert.assertTrue(combinerInputRecords > 0);
Assert.assertTrue(combinerInputRecords > combinerOutputRecords);
BufferedReader br=new BufferedReader(new FileReader(new File(out,"part-r-00000")));
Set output=new HashSet();
String line=br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0,1) + line.substring(4,5));
line=br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0,1) + line.substring(4,5));
line=br.readLine();
Assert.assertNull(line);
br.close();
Set expected=new HashSet();
expected.add("A2");
expected.add("B5");
Assert.assertEquals(expected,output);
}
else {
Assert.fail("Job failed");
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testPluginAbility(){
try {
JobConf jobConf=new JobConf();
jobConf.setClass(MRConfig.SHUFFLE_CONSUMER_PLUGIN,TestShufflePlugin.TestShuffleConsumerPlugin.class,ShuffleConsumerPlugin.class);
ShuffleConsumerPlugin shuffleConsumerPlugin=null;
Class extends ShuffleConsumerPlugin> clazz=jobConf.getClass(MRConfig.SHUFFLE_CONSUMER_PLUGIN,Shuffle.class,ShuffleConsumerPlugin.class);
assertNotNull("Unable to get " + MRConfig.SHUFFLE_CONSUMER_PLUGIN,clazz);
shuffleConsumerPlugin=ReflectionUtils.newInstance(clazz,jobConf);
assertNotNull("Unable to load " + MRConfig.SHUFFLE_CONSUMER_PLUGIN,shuffleConsumerPlugin);
}
catch ( Exception e) {
assertTrue("Threw exception:" + e,false);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier IgnoredMethod HybridVerifier
/**
* Tests context.setStatus method.
* TODO fix testcase
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
@Test @Ignore public void testContextStatus() throws IOException, InterruptedException, ClassNotFoundException {
Path test=new Path(testRootTempDir,"testContextStatus");
int numMaps=1;
Job job=MapReduceTestUtil.createJob(createJobConf(),new Path(test,"in"),new Path(test,"out"),numMaps,0);
job.setMapperClass(MyMapper.class);
job.waitForCompletion(true);
assertTrue("Job failed",job.isSuccessful());
TaskReport[] reports=job.getTaskReports(TaskType.MAP);
assertEquals(numMaps,reports.length);
assertEquals(myStatus,reports[0].getState());
int numReduces=1;
job=MapReduceTestUtil.createJob(createJobConf(),new Path(test,"in"),new Path(test,"out"),numMaps,numReduces);
job.setMapperClass(DataCopyMapper.class);
job.setReducerClass(DataCopyReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setMaxMapAttempts(1);
job.setMaxReduceAttempts(0);
job.waitForCompletion(true);
assertTrue("Job failed",job.isSuccessful());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Tests new MapReduce reduce task's context.getProgress() method.
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
@Test public void testReduceContextProgress() throws IOException, InterruptedException, ClassNotFoundException {
int numTasks=1;
Path test=new Path(testRootTempDir,"testReduceContextProgress");
Job job=MapReduceTestUtil.createJob(createJobConf(),new Path(test,"in"),new Path(test,"out"),numTasks,numTasks,INPUT);
job.setMapperClass(ProgressCheckerMapper.class);
job.setReducerClass(ProgressCheckerReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMaxMapAttempts(1);
job.setMaxReduceAttempts(1);
job.waitForCompletion(true);
assertTrue("Job failed",job.isSuccessful());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that child queues are converted too during conversion of the parent
* queue
*/
@Test public void testFromYarnQueue(){
org.apache.hadoop.yarn.api.records.QueueInfo child=Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING);
org.apache.hadoop.yarn.api.records.QueueInfo queueInfo=Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
List children=new ArrayList();
children.add(child);
Mockito.when(queueInfo.getChildQueues()).thenReturn(children);
Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING);
org.apache.hadoop.mapreduce.QueueInfo returned=TypeConverter.fromYarn(queueInfo,new Configuration());
Assert.assertEquals("QueueInfo children weren't properly converted",returned.getQueueChildren().size(),1);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testFromYarnApplicationReport(){
ApplicationId mockAppId=mock(ApplicationId.class);
when(mockAppId.getClusterTimestamp()).thenReturn(12345L);
when(mockAppId.getId()).thenReturn(6789);
ApplicationReport mockReport=mock(ApplicationReport.class);
when(mockReport.getTrackingUrl()).thenReturn("dummy-tracking-url");
when(mockReport.getApplicationId()).thenReturn(mockAppId);
when(mockReport.getYarnApplicationState()).thenReturn(YarnApplicationState.KILLED);
when(mockReport.getUser()).thenReturn("dummy-user");
when(mockReport.getQueue()).thenReturn("dummy-queue");
String jobFile="dummy-path/job.xml";
try {
JobStatus status=TypeConverter.fromYarn(mockReport,jobFile);
}
catch ( NullPointerException npe) {
Assert.fail("Type converstion from YARN fails for jobs without " + "ApplicationUsageReport");
}
ApplicationResourceUsageReport appUsageRpt=Records.newRecord(ApplicationResourceUsageReport.class);
Resource r=Records.newRecord(Resource.class);
r.setMemory(2048);
appUsageRpt.setNeededResources(r);
appUsageRpt.setNumReservedContainers(1);
appUsageRpt.setNumUsedContainers(3);
appUsageRpt.setReservedResources(r);
appUsageRpt.setUsedResources(r);
when(mockReport.getApplicationResourceUsageReport()).thenReturn(appUsageRpt);
JobStatus status=TypeConverter.fromYarn(mockReport,jobFile);
Assert.assertNotNull("fromYarn returned null status",status);
Assert.assertEquals("jobFile set incorrectly","dummy-path/job.xml",status.getJobFile());
Assert.assertEquals("queue set incorrectly","dummy-queue",status.getQueue());
Assert.assertEquals("trackingUrl set incorrectly","dummy-tracking-url",status.getTrackingUrl());
Assert.assertEquals("user set incorrectly","dummy-user",status.getUsername());
Assert.assertEquals("schedulingInfo set incorrectly","dummy-tracking-url",status.getSchedulingInfo());
Assert.assertEquals("jobId set incorrectly",6789,status.getJobID().getId());
Assert.assertEquals("state set incorrectly",JobStatus.State.KILLED,status.getState());
Assert.assertEquals("needed mem info set incorrectly",2048,status.getNeededMem());
Assert.assertEquals("num rsvd slots info set incorrectly",1,status.getNumReservedSlots());
Assert.assertEquals("num used slots info set incorrectly",3,status.getNumUsedSlots());
Assert.assertEquals("rsvd mem info set incorrectly",2048,status.getReservedMem());
Assert.assertEquals("used mem info set incorrectly",2048,status.getUsedMem());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFromYarnQueueInfo(){
org.apache.hadoop.yarn.api.records.QueueInfo queueInfo=Records.newRecord(org.apache.hadoop.yarn.api.records.QueueInfo.class);
queueInfo.setQueueState(org.apache.hadoop.yarn.api.records.QueueState.STOPPED);
org.apache.hadoop.mapreduce.QueueInfo returned=TypeConverter.fromYarn(queueInfo,new Configuration());
Assert.assertEquals("queueInfo translation didn't work.",returned.getState().toString(),queueInfo.getQueueState().toString().toLowerCase());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFromYarn() throws Exception {
int appStartTime=612354;
int appFinishTime=612355;
YarnApplicationState state=YarnApplicationState.RUNNING;
ApplicationId applicationId=ApplicationId.newInstance(0,0);
ApplicationReport applicationReport=Records.newRecord(ApplicationReport.class);
applicationReport.setApplicationId(applicationId);
applicationReport.setYarnApplicationState(state);
applicationReport.setStartTime(appStartTime);
applicationReport.setFinishTime(appFinishTime);
applicationReport.setUser("TestTypeConverter-user");
ApplicationResourceUsageReport appUsageRpt=Records.newRecord(ApplicationResourceUsageReport.class);
Resource r=Records.newRecord(Resource.class);
r.setMemory(2048);
appUsageRpt.setNeededResources(r);
appUsageRpt.setNumReservedContainers(1);
appUsageRpt.setNumUsedContainers(3);
appUsageRpt.setReservedResources(r);
appUsageRpt.setUsedResources(r);
applicationReport.setApplicationResourceUsageReport(appUsageRpt);
JobStatus jobStatus=TypeConverter.fromYarn(applicationReport,"dummy-jobfile");
Assert.assertEquals(appStartTime,jobStatus.getStartTime());
Assert.assertEquals(appFinishTime,jobStatus.getFinishTime());
Assert.assertEquals(state.toString(),jobStatus.getState().toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFromYarnJobReport() throws Exception {
int jobStartTime=612354;
int jobFinishTime=612355;
JobState state=JobState.RUNNING;
JobId jobId=Records.newRecord(JobId.class);
JobReport jobReport=Records.newRecord(JobReport.class);
ApplicationId applicationId=ApplicationId.newInstance(0,0);
jobId.setAppId(applicationId);
jobId.setId(0);
jobReport.setJobId(jobId);
jobReport.setJobState(state);
jobReport.setStartTime(jobStartTime);
jobReport.setFinishTime(jobFinishTime);
jobReport.setUser("TestTypeConverter-user");
JobStatus jobStatus=TypeConverter.fromYarn(jobReport,"dummy-jobfile");
Assert.assertEquals(jobStartTime,jobStatus.getStartTime());
Assert.assertEquals(jobFinishTime,jobStatus.getFinishTime());
Assert.assertEquals(state.toString(),jobStatus.getState().toString());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testDetermineTimestamps() throws IOException {
Job job=Job.getInstance(conf);
job.addCacheFile(firstCacheFile.toUri());
job.addCacheFile(secondCacheFile.toUri());
Configuration jobConf=job.getConfiguration();
Map statCache=new HashMap();
ClientDistributedCacheManager.determineTimestamps(jobConf,statCache);
FileStatus firstStatus=statCache.get(firstCacheFile.toUri());
FileStatus secondStatus=statCache.get(secondCacheFile.toUri());
Assert.assertNotNull(firstStatus);
Assert.assertNotNull(secondStatus);
Assert.assertEquals(2,statCache.size());
String expected=firstStatus.getModificationTime() + "," + secondStatus.getModificationTime();
Assert.assertEquals(expected,jobConf.get(MRJobConfig.CACHE_FILE_TIMESTAMPS));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=50000) public void testDefaultFsIsUsedForHistory() throws Exception {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,dfsCluster.getURI().toString());
FileOutputStream os=new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,"file:///");
TestParams t=new TestParams();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,t.dfsWorkDir);
JHEvenHandlerForTest realJheh=new JHEvenHandlerForTest(t.mockAppContext,0,false);
JHEvenHandlerForTest jheh=spy(realJheh);
jheh.init(conf);
try {
jheh.start();
handleEvent(jheh,new JobHistoryEvent(t.jobId,new AMStartedEvent(t.appAttemptId,200,t.containerId,"nmhost",3000,4000)));
handleEvent(jheh,new JobHistoryEvent(t.jobId,new JobFinishedEvent(TypeConverter.fromYarn(t.jobId),0,0,0,0,0,new Counters(),new Counters(),new Counters())));
FileSystem dfsFileSystem=dfsCluster.getFileSystem();
assertTrue("Minicluster contains some history files",dfsFileSystem.globStatus(new Path(t.dfsWorkDir + "/*")).length != 0);
FileSystem localFileSystem=LocalFileSystem.get(conf);
assertFalse("No history directory on non-default file system",localFileSystem.exists(new Path(t.dfsWorkDir)));
}
finally {
jheh.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetHistoryIntermediateDoneDirForUser() throws IOException {
Configuration conf=new Configuration();
conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR,"/mapred/history/done_intermediate");
conf.set(MRJobConfig.USER_NAME,System.getProperty("user.name"));
String pathStr=JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
Assert.assertEquals("/mapred/history/done_intermediate/" + System.getProperty("user.name"),pathStr);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,dfsCluster.getURI().toString());
FileOutputStream os=new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,"file:///");
pathStr=JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
Assert.assertEquals(dfsCluster.getURI().toString() + "/mapred/history/done_intermediate/" + System.getProperty("user.name"),pathStr);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=1000) public void testDataDrivenDBInputFormat() throws Exception {
JobContext jobContext=mock(JobContext.class);
Configuration configuration=new Configuration();
configuration.setInt(MRJobConfig.NUM_MAPS,1);
when(jobContext.getConfiguration()).thenReturn(configuration);
DataDrivenDBInputFormat format=new DataDrivenDBInputFormat();
List splits=format.getSplits(jobContext);
assertEquals(1,splits.size());
DataDrivenDBInputSplit split=(DataDrivenDBInputSplit)splits.get(0);
assertEquals("1=1",split.getLowerClause());
assertEquals("1=1",split.getUpperClause());
configuration.setInt(MRJobConfig.NUM_MAPS,2);
DataDrivenDBInputFormat.setBoundingQuery(configuration,"query");
assertEquals("query",configuration.get(DBConfiguration.INPUT_BOUNDING_QUERY));
Job job=mock(Job.class);
when(job.getConfiguration()).thenReturn(configuration);
DataDrivenDBInputFormat.setInput(job,NullDBWritable.class,"query","Bounding Query");
assertEquals("Bounding Query",configuration.get(DBConfiguration.INPUT_BOUNDING_QUERY));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that directories do not get included as part of getSplits()
*/
@Test public void testGetSplitsWithDirectory() throws Exception {
MiniDFSCluster dfs=null;
try {
Configuration conf=new Configuration();
dfs=new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1).build();
dfs.waitActive();
dfs=new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1).build();
dfs.waitActive();
FileSystem fileSys=dfs.getFileSystem();
Path dir1=new Path("/dir1");
Path file=new Path("/dir1/file1");
Path dir2=new Path("/dir1/dir2");
if (!fileSys.mkdirs(dir1)) {
throw new IOException("Mkdirs failed to create " + dir1.toString());
}
FSDataOutputStream out=fileSys.create(file);
out.write(new byte[0]);
out.close();
if (!fileSys.mkdirs(dir2)) {
throw new IOException("Mkdirs failed to create " + dir2.toString());
}
DummyInputFormat inFormat=new DummyInputFormat();
Job job=Job.getInstance(conf);
FileInputFormat.setInputPaths(job,"/dir1");
List splits=inFormat.getSplits(job);
assertEquals(1,splits.size());
CombineFileSplit fileSplit=(CombineFileSplit)splits.get(0);
assertEquals(1,fileSplit.getNumPaths());
assertEquals(file.getName(),fileSplit.getPath(0).getName());
assertEquals(0,fileSplit.getOffset(0));
assertEquals(0,fileSplit.getLength(0));
}
finally {
if (dfs != null) {
dfs.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test when the input file's length is 0.
*/
@Test public void testForEmptyFile() throws Exception {
Configuration conf=new Configuration();
FileSystem fileSys=FileSystem.get(conf);
Path file=new Path("test" + "/file");
FSDataOutputStream out=fileSys.create(file,true,conf.getInt("io.file.buffer.size",4096),(short)1,(long)BLOCKSIZE);
out.write(new byte[0]);
out.close();
DummyInputFormat inFormat=new DummyInputFormat();
Job job=Job.getInstance(conf);
FileInputFormat.setInputPaths(job,"test");
List splits=inFormat.getSplits(job);
assertEquals(1,splits.size());
CombineFileSplit fileSplit=(CombineFileSplit)splits.get(0);
assertEquals(1,fileSplit.getNumPaths());
assertEquals(file.getName(),fileSplit.getPath(0).getName());
assertEquals(0,fileSplit.getOffset(0));
assertEquals(0,fileSplit.getLength(0));
fileSys.delete(file.getParent(),true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test when input files are from non-default file systems
*/
@Test public void testForNonDefaultFileSystem() throws Throwable {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,DUMMY_FS_URI);
assertEquals(DUMMY_FS_URI,FileSystem.getDefaultUri(conf).toString());
Path localPath=new Path("testFile1");
FileSystem lfs=FileSystem.getLocal(conf);
FSDataOutputStream dos=lfs.create(localPath);
dos.writeChars("Local file for CFIF");
dos.close();
Job job=Job.getInstance(conf);
FileInputFormat.setInputPaths(job,lfs.makeQualified(localPath));
DummyInputFormat inFormat=new DummyInputFormat();
List splits=inFormat.getSplits(job);
assertTrue(splits.size() > 0);
for ( InputSplit s : splits) {
CombineFileSplit cfs=(CombineFileSplit)s;
for ( Path p : cfs.getPaths()) {
assertEquals(p.toUri().getScheme(),"file");
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testFormat() throws IOException, InterruptedException {
Job job=Job.getInstance(conf);
Random random=new Random();
long seed=random.nextLong();
random.setSeed(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int length=10000;
final int numFiles=10;
createFiles(length,numFiles,random,job);
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
InputFormat format=new CombineSequenceFileInputFormat();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(length / (SequenceFile.SYNC_INTERVAL / 20)) + 1;
LOG.info("splitting: requesting = " + numSplits);
List splits=format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
assertEquals("We got more than one splits!",1,splits.size());
InputSplit split=splits.get(0);
assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass());
BitSet bits=new BitSet(length);
RecordReader reader=format.createRecordReader(split,context);
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split);
reader.initialize(split,mcontext);
assertEquals("reader class is CombineFileRecordReader.",CombineFileRecordReader.class,reader.getClass());
try {
while (reader.nextKeyValue()) {
IntWritable key=reader.getCurrentKey();
BytesWritable value=reader.getCurrentValue();
assertNotNull("Value should not be null.",value);
final int k=key.get();
LOG.debug("read " + k);
assertFalse("Key in multiple partitions.",bits.get(k));
bits.set(k);
}
}
finally {
reader.close();
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testFormat() throws Exception {
Job job=Job.getInstance(new Configuration(defaultConf));
Random random=new Random();
long seed=random.nextLong();
LOG.info("seed = " + seed);
random.setSeed(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int length=10000;
final int numFiles=10;
createFiles(length,numFiles,random);
CombineTextInputFormat format=new CombineTextInputFormat();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(length / 20) + 1;
LOG.info("splitting: requesting = " + numSplits);
List splits=format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
assertEquals("We got more than one splits!",1,splits.size());
InputSplit split=splits.get(0);
assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass());
BitSet bits=new BitSet(length);
LOG.debug("split= " + split);
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader reader=format.createRecordReader(split,context);
assertEquals("reader class is CombineFileRecordReader.",CombineFileRecordReader.class,reader.getClass());
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split);
reader.initialize(split,mcontext);
try {
int count=0;
while (reader.nextKeyValue()) {
LongWritable key=reader.getCurrentKey();
assertNotNull("Key should not be null.",key);
Text value=reader.getCurrentValue();
final int v=Integer.parseInt(value.toString());
LOG.debug("read " + v);
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
LOG.debug("split=" + split + " count="+ count);
}
finally {
reader.close();
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test using the gzip codec for reading
*/
@Test(timeout=10000) public void testGzip() throws IOException, InterruptedException {
Configuration conf=new Configuration(defaultConf);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,conf);
localFs.delete(workDir,true);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n");
Job job=Job.getInstance(conf);
FileInputFormat.setInputPaths(job,workDir);
CombineTextInputFormat format=new CombineTextInputFormat();
List splits=format.getSplits(job);
assertEquals("compressed splits == 1",1,splits.size());
List results=readSplit(format,splits.get(0),job);
assertEquals("splits[0] length",8,results.size());
final String[] firstList={"the quick","brown","fox jumped","over"," the lazy"," dog"};
final String[] secondList={"this is a test","of gzip"};
String first=results.get(0).toString();
if (first.equals(firstList[0])) {
testResults(results,firstList,secondList);
}
else if (first.equals(secondList[0])) {
testResults(results,secondList,firstList);
}
else {
fail("unexpected first token!");
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNumInputFilesWithoutRecursively() throws Exception {
Configuration conf=getConfiguration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads);
Job job=Job.getInstance(conf);
FileInputFormat,?> fileInputFormat=new TextInputFormat();
List splits=fileInputFormat.getSplits(job);
Assert.assertEquals("Input splits are not correct",2,splits.size());
verifySplits(Lists.newArrayList("test:/a1/a2","test:/a1/file1"),splits);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNumInputFilesRecursively() throws Exception {
Configuration conf=getConfiguration();
conf.set(FileInputFormat.INPUT_DIR_RECURSIVE,"true");
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads);
Job job=Job.getInstance(conf);
FileInputFormat,?> fileInputFormat=new TextInputFormat();
List splits=fileInputFormat.getSplits(job);
Assert.assertEquals("Input splits are not correct",3,splits.size());
verifySplits(Lists.newArrayList("test:/a1/a2/file2","test:/a1/a2/file3","test:/a1/file1"),splits);
conf=getConfiguration();
conf.set("mapred.input.dir.recursive","true");
job=Job.getInstance(conf);
splits=fileInputFormat.getSplits(job);
verifySplits(Lists.newArrayList("test:/a1/a2/file2","test:/a1/a2/file3","test:/a1/file1"),splits);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testListLocatedStatus() throws Exception {
Configuration conf=getConfiguration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads);
conf.setBoolean("fs.test.impl.disable.cache",false);
conf.set(FileInputFormat.INPUT_DIR,"test:///a1/a2");
MockFileSystem mockFs=(MockFileSystem)new Path("test:///").getFileSystem(conf);
Assert.assertEquals("listLocatedStatus already called",0,mockFs.numListLocatedStatusCalls);
Job job=Job.getInstance(conf);
FileInputFormat,?> fileInputFormat=new TextInputFormat();
List splits=fileInputFormat.getSplits(job);
Assert.assertEquals("Input splits are not correct",2,splits.size());
Assert.assertEquals("listLocatedStatuss calls",1,mockFs.numListLocatedStatusCalls);
FileSystem.closeAll();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSplitLocationInfo() throws Exception {
Configuration conf=getConfiguration();
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,"test:///a1/a2");
Job job=Job.getInstance(conf);
TextInputFormat fileInputFormat=new TextInputFormat();
List splits=fileInputFormat.getSplits(job);
String[] locations=splits.get(0).getLocations();
Assert.assertEquals(2,locations.length);
SplitLocationInfo[] locationInfo=splits.get(0).getLocationInfo();
Assert.assertEquals(2,locationInfo.length);
SplitLocationInfo localhostInfo=locations[0].equals("localhost") ? locationInfo[0] : locationInfo[1];
SplitLocationInfo otherhostInfo=locations[0].equals("otherhost") ? locationInfo[0] : locationInfo[1];
Assert.assertTrue(localhostInfo.isOnDisk());
Assert.assertTrue(localhostInfo.isInMemory());
Assert.assertTrue(otherhostInfo.isOnDisk());
Assert.assertFalse(otherhostInfo.isInMemory());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test using the gzip codec with two input files.
*/
@Test(timeout=5000) public void testGzipWithTwoInputs() throws Exception {
CompressionCodec gzip=new GzipCodec();
localFs.delete(workDir,true);
Job job=Job.getInstance(defaultConf);
FixedLengthInputFormat format=new FixedLengthInputFormat();
format.setRecordLength(job.getConfiguration(),5);
ReflectionUtils.setConf(gzip,job.getConfiguration());
FileInputFormat.setInputPaths(job,workDir);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"one two threefour five six seveneightnine ten ");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"ten nine eightsevensix five four threetwo one ");
List splits=format.getSplits(job);
assertEquals("compressed splits == 2",2,splits.size());
FileSplit tmp=(FileSplit)splits.get(0);
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits.set(0,splits.get(1));
splits.set(1,tmp);
}
List results=readSplit(format,splits.get(0),job);
assertEquals("splits[0] length",10,results.size());
assertEquals("splits[0][5]","six ",results.get(5));
results=readSplit(format,splits.get(1),job);
assertEquals("splits[1] length",10,results.size());
assertEquals("splits[1][0]","ten ",results.get(0));
assertEquals("splits[1][1]","nine ",results.get(1));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings({"rawtypes","unchecked"}) public void testLastInputSplitAtSplitBoundary() throws Exception {
FileInputFormat fif=new FileInputFormatForTest(1024l * 1024 * 1024,128l * 1024 * 1024);
Configuration conf=new Configuration();
JobContext jobContext=mock(JobContext.class);
when(jobContext.getConfiguration()).thenReturn(conf);
List splits=fif.getSplits(jobContext);
assertEquals(8,splits.size());
for (int i=0; i < splits.size(); i++) {
InputSplit split=splits.get(i);
assertEquals(("host" + i),split.getLocations()[0]);
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings({"rawtypes","unchecked"}) public void testLastInputSplitExceedingSplitBoundary() throws Exception {
FileInputFormat fif=new FileInputFormatForTest(1027l * 1024 * 1024,128l * 1024 * 1024);
Configuration conf=new Configuration();
JobContext jobContext=mock(JobContext.class);
when(jobContext.getConfiguration()).thenReturn(conf);
List splits=fif.getSplits(jobContext);
assertEquals(8,splits.size());
for (int i=0; i < splits.size(); i++) {
InputSplit split=splits.get(i);
assertEquals(("host" + i),split.getLocations()[0]);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test when the input file's length is 0.
*/
@Test public void testForEmptyFile() throws Exception {
Configuration conf=new Configuration();
FileSystem fileSys=FileSystem.get(conf);
Path file=new Path("test" + "/file");
FSDataOutputStream out=fileSys.create(file,true,conf.getInt("io.file.buffer.size",4096),(short)1,(long)1024);
out.write(new byte[0]);
out.close();
DummyInputFormat inFormat=new DummyInputFormat();
Job job=Job.getInstance(conf);
FileInputFormat.setInputPaths(job,"test");
List splits=inFormat.getSplits(job);
assertEquals(1,splits.size());
FileSplit fileSplit=(FileSplit)splits.get(0);
assertEquals(0,fileSplit.getLocations().length);
assertEquals(file.getName(),fileSplit.getPath().getName());
assertEquals(0,fileSplit.getStart());
assertEquals(0,fileSplit.getLength());
fileSys.delete(file.getParent(),true);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAddInputPath() throws IOException {
final Configuration conf=new Configuration();
conf.set("fs.defaultFS","s3://abc:xyz@hostname/");
final Job j=Job.getInstance(conf);
j.getConfiguration().set("fs.defaultFS","s3://abc:xyz@hostname/");
final FileSystem defaultfs=FileSystem.get(conf);
System.out.println("defaultfs.getUri() = " + defaultfs.getUri());
{
final Path original=new Path("file:/foo");
System.out.println("original = " + original);
FileInputFormat.addInputPath(j,original);
final Path[] results=FileInputFormat.getInputPaths(j);
System.out.println("results = " + Arrays.asList(results));
assertEquals(1,results.length);
assertEquals(original,results[0]);
}
{
final Path original=new Path("file:/bar");
System.out.println("original = " + original);
FileInputFormat.setInputPaths(j,original);
final Path[] results=FileInputFormat.getInputPaths(j);
System.out.println("results = " + Arrays.asList(results));
assertEquals(1,results.length);
assertEquals(original,results[0]);
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings({"rawtypes","unchecked"}) public void testLastInputSplitSingleSplit() throws Exception {
FileInputFormat fif=new FileInputFormatForTest(100l * 1024 * 1024,128l * 1024 * 1024);
Configuration conf=new Configuration();
JobContext jobContext=mock(JobContext.class);
when(jobContext.getConfiguration()).thenReturn(conf);
List splits=fif.getSplits(jobContext);
assertEquals(1,splits.size());
for (int i=0; i < splits.size(); i++) {
InputSplit split=splits.get(i);
assertEquals(("host" + i),split.getLocations()[0]);
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSplitableCodecs() throws Exception {
final Job job=Job.getInstance(defaultConf);
final Configuration conf=job.getConfiguration();
CompressionCodec codec=null;
try {
codec=(CompressionCodec)ReflectionUtils.newInstance(conf.getClassByName("org.apache.hadoop.io.compress.BZip2Codec"),conf);
}
catch ( ClassNotFoundException cnfe) {
throw new IOException("Illegal codec!");
}
Path file=new Path(workDir,"test" + codec.getDefaultExtension());
int seed=new Random().nextInt();
LOG.info("seed = " + seed);
Random random=new Random(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int MAX_LENGTH=500000;
FileInputFormat.setMaxInputSplitSize(job,MAX_LENGTH / 20);
for (int length=0; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 4) + 1) {
LOG.info("creating; entries = " + length);
Writer writer=new OutputStreamWriter(codec.createOutputStream(localFs.create(file)));
try {
for (int i=0; i < length; i++) {
writer.write(Integer.toString(i * 2));
writer.write("\t");
writer.write(Integer.toString(i));
writer.write("\n");
}
}
finally {
writer.close();
}
KeyValueTextInputFormat format=new KeyValueTextInputFormat();
assertTrue("KVTIF claims not splittable",format.isSplitable(job,file));
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(MAX_LENGTH / 2000) + 1;
LOG.info("splitting: requesting = " + numSplits);
List splits=format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
BitSet bits=new BitSet(length);
for (int j=0; j < splits.size(); j++) {
LOG.debug("split[" + j + "]= "+ splits.get(j));
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader reader=format.createRecordReader(splits.get(j),context);
Class> clazz=reader.getClass();
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),splits.get(j));
reader.initialize(splits.get(j),mcontext);
Text key=null;
Text value=null;
try {
int count=0;
while (reader.nextKeyValue()) {
key=reader.getCurrentKey();
value=reader.getCurrentValue();
final int k=Integer.parseInt(key.toString());
final int v=Integer.parseInt(value.toString());
assertEquals("Bad key",0,k % 2);
assertEquals("Mismatched key/value",k / 2,v);
LOG.debug("read " + k + ","+ v);
assertFalse(k + "," + v+ " in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
if (count > 0) {
LOG.info("splits[" + j + "]="+ splits.get(j)+ " count="+ count);
}
else {
LOG.debug("splits[" + j + "]="+ splits.get(j)+ " count="+ count);
}
}
finally {
reader.close();
}
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test using the gzip codec for reading
*/
@Test public void testGzip() throws IOException, InterruptedException {
Configuration conf=new Configuration(defaultConf);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,conf);
localFs.delete(workDir,true);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"line-1\tthe quick\nline-2\tbrown\nline-3\t" + "fox jumped\nline-4\tover\nline-5\t the lazy\nline-6\t dog\n");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"line-1\tthis is a test\nline-1\tof gzip\n");
Job job=Job.getInstance(conf);
FileInputFormat.setInputPaths(job,workDir);
KeyValueTextInputFormat format=new KeyValueTextInputFormat();
List splits=format.getSplits(job);
assertEquals("compressed splits == 2",2,splits.size());
FileSplit tmp=(FileSplit)splits.get(0);
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits.set(0,splits.get(1));
splits.set(1,tmp);
}
List results=readSplit(format,splits.get(0),job);
assertEquals("splits[0] length",6,results.size());
assertEquals("splits[0][0]","the quick",results.get(0).toString());
assertEquals("splits[0][1]","brown",results.get(1).toString());
assertEquals("splits[0][2]","fox jumped",results.get(2).toString());
assertEquals("splits[0][3]","over",results.get(3).toString());
assertEquals("splits[0][4]"," the lazy",results.get(4).toString());
assertEquals("splits[0][5]"," dog",results.get(5).toString());
results=readSplit(format,splits.get(1),job);
assertEquals("splits[1] length",2,results.size());
assertEquals("splits[1][0]","this is a test",results.get(0).toString());
assertEquals("splits[1][1]","of gzip",results.get(1).toString());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFormat() throws Exception {
Job job=Job.getInstance(new Configuration(defaultConf));
Path file=new Path(workDir,"test.txt");
int seed=new Random().nextInt();
LOG.info("seed = " + seed);
Random random=new Random(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int MAX_LENGTH=10000;
for (int length=0; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 10) + 1) {
LOG.debug("creating; entries = " + length);
Writer writer=new OutputStreamWriter(localFs.create(file));
try {
for (int i=0; i < length; i++) {
writer.write(Integer.toString(i * 2));
writer.write("\t");
writer.write(Integer.toString(i));
writer.write("\n");
}
}
finally {
writer.close();
}
KeyValueTextInputFormat format=new KeyValueTextInputFormat();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(MAX_LENGTH / 20) + 1;
LOG.debug("splitting: requesting = " + numSplits);
List splits=format.getSplits(job);
LOG.debug("splitting: got = " + splits.size());
BitSet bits=new BitSet(length);
for (int j=0; j < splits.size(); j++) {
LOG.debug("split[" + j + "]= "+ splits.get(j));
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader reader=format.createRecordReader(splits.get(j),context);
Class> clazz=reader.getClass();
assertEquals("reader class is KeyValueLineRecordReader.",KeyValueLineRecordReader.class,clazz);
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),splits.get(j));
reader.initialize(splits.get(j),mcontext);
Text key=null;
Text value=null;
try {
int count=0;
while (reader.nextKeyValue()) {
key=reader.getCurrentKey();
clazz=key.getClass();
assertEquals("Key class is Text.",Text.class,clazz);
value=reader.getCurrentValue();
clazz=value.getClass();
assertEquals("Value class is Text.",Text.class,clazz);
final int k=Integer.parseInt(key.toString());
final int v=Integer.parseInt(value.toString());
assertEquals("Bad key",0,k % 2);
assertEquals("Mismatched key/value",k / 2,v);
LOG.debug("read " + v);
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
LOG.debug("splits[" + j + "]="+ splits.get(j)+ " count="+ count);
}
finally {
reader.close();
}
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDoMultipleInputs() throws IOException {
Path in1Dir=getDir(IN1_DIR);
Path in2Dir=getDir(IN2_DIR);
Path outDir=getDir(OUT_DIR);
Configuration conf=createJobConf();
FileSystem fs=FileSystem.get(conf);
fs.delete(outDir,true);
DataOutputStream file1=fs.create(new Path(in1Dir,"part-0"));
file1.writeBytes("a\nb\nc\nd\ne");
file1.close();
DataOutputStream file2=fs.create(new Path(in2Dir,"part-0"));
file2.writeBytes("a\tblah\nb\tblah\nc\tblah\nd\tblah\ne\tblah");
file2.close();
Job job=Job.getInstance(conf);
job.setJobName("mi");
MultipleInputs.addInputPath(job,in1Dir,TextInputFormat.class,MapClass.class);
MultipleInputs.addInputPath(job,in2Dir,KeyValueTextInputFormat.class,KeyValueMapClass.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(Text.class);
job.setReducerClass(ReducerClass.class);
FileOutputFormat.setOutputPath(job,outDir);
boolean success=false;
try {
success=job.waitForCompletion(true);
}
catch ( InterruptedException ie) {
throw new RuntimeException(ie);
}
catch ( ClassNotFoundException instante) {
throw new RuntimeException(instante);
}
if (!success) throw new RuntimeException("Job failed!");
BufferedReader output=new BufferedReader(new InputStreamReader(fs.open(new Path(outDir,"part-r-00000"))));
assertTrue(output.readLine().equals("a 2"));
assertTrue(output.readLine().equals("b 2"));
assertTrue(output.readLine().equals("c 2"));
assertTrue(output.readLine().equals("d 2"));
assertTrue(output.readLine().equals("e 2"));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testControlledJob() throws Exception {
LOG.info("Starting testControlledJob");
Configuration conf=createJobConf();
cleanupData(conf);
Job job1=MapReduceTestUtil.createCopyJob(conf,outdir_1,indir);
JobControl theControl=createDependencies(conf,job1);
while (cjob1.getJobState() != ControlledJob.State.RUNNING) {
try {
Thread.sleep(100);
}
catch ( InterruptedException e) {
break;
}
}
Assert.assertNotNull(cjob1.getMapredJobId());
waitTillAllFinished(theControl);
assertEquals("Some jobs failed",0,theControl.getFailedJobList().size());
theControl.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSuccessfulJobs() throws Exception {
JobControl jobControl=new JobControl("Test");
ControlledJob job1=createSuccessfulControlledJob(jobControl);
ControlledJob job2=createSuccessfulControlledJob(jobControl);
ControlledJob job3=createSuccessfulControlledJob(jobControl,job1,job2);
ControlledJob job4=createSuccessfulControlledJob(jobControl,job3);
runJobControl(jobControl);
assertEquals("Success list",4,jobControl.getSuccessfulJobList().size());
assertEquals("Failed list",0,jobControl.getFailedJobList().size());
assertTrue(job1.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(job2.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(job3.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(job4.getJobState() == ControlledJob.State.SUCCESS);
jobControl.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFailedJob() throws Exception {
JobControl jobControl=new JobControl("Test");
ControlledJob job1=createFailedControlledJob(jobControl);
ControlledJob job2=createSuccessfulControlledJob(jobControl);
ControlledJob job3=createSuccessfulControlledJob(jobControl,job1,job2);
ControlledJob job4=createSuccessfulControlledJob(jobControl,job3);
runJobControl(jobControl);
assertEquals("Success list",1,jobControl.getSuccessfulJobList().size());
assertEquals("Failed list",3,jobControl.getFailedJobList().size());
assertTrue(job1.getJobState() == ControlledJob.State.FAILED);
assertTrue(job2.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(job3.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
assertTrue(job4.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
jobControl.stop();
}
APIUtilityVerifier IterativeVerifier EqualityVerifier
/**
* Verify IntervalSampler contract, that samples are taken at regular
* intervals from the given splits.
*/
@Test @SuppressWarnings("unchecked") public void testIntervalSampler() throws Exception {
final int TOT_SPLITS=16;
final int PER_SPLIT_SAMPLE=4;
final int NUM_SAMPLES=TOT_SPLITS * PER_SPLIT_SAMPLE;
final double FREQ=1.0 / TOT_SPLITS;
InputSampler.Sampler sampler=new InputSampler.IntervalSampler(FREQ,NUM_SAMPLES);
int inits[]=new int[TOT_SPLITS];
for (int i=0; i < TOT_SPLITS; ++i) {
inits[i]=i;
}
Job ignored=Job.getInstance();
Object[] samples=sampler.getSample(new TestInputSamplerIF(NUM_SAMPLES,TOT_SPLITS,inits),ignored);
assertEquals(NUM_SAMPLES,samples.length);
Arrays.sort(samples,new IntWritable.Comparator());
for (int i=0; i < NUM_SAMPLES; ++i) {
assertEquals(i,((IntWritable)samples[i]).get());
}
}
APIUtilityVerifier IterativeVerifier EqualityVerifier
/**
* Verify IntervalSampler in mapred.lib.InputSampler, which is added back
* for binary compatibility of M/R 1.x
*/
@Test(timeout=30000) @SuppressWarnings("unchecked") public void testMapredIntervalSampler() throws Exception {
final int TOT_SPLITS=16;
final int PER_SPLIT_SAMPLE=4;
final int NUM_SAMPLES=TOT_SPLITS * PER_SPLIT_SAMPLE;
final double FREQ=1.0 / TOT_SPLITS;
org.apache.hadoop.mapred.lib.InputSampler.Sampler sampler=new org.apache.hadoop.mapred.lib.InputSampler.IntervalSampler(FREQ,NUM_SAMPLES);
int inits[]=new int[TOT_SPLITS];
for (int i=0; i < TOT_SPLITS; ++i) {
inits[i]=i;
}
Job ignored=Job.getInstance();
Object[] samples=sampler.getSample(new TestInputSamplerIF(NUM_SAMPLES,TOT_SPLITS,inits),ignored);
assertEquals(NUM_SAMPLES,samples.length);
Arrays.sort(samples,new IntWritable.Comparator());
for (int i=0; i < NUM_SAMPLES; ++i) {
assertEquals(i,((IntWritable)samples[i]).get());
}
}
APIUtilityVerifier IterativeVerifier EqualityVerifier
/**
* Verify SplitSampler contract, that an equal number of records are taken
* from the first splits.
*/
@Test @SuppressWarnings("unchecked") public void testSplitSampler() throws Exception {
final int TOT_SPLITS=15;
final int NUM_SPLITS=5;
final int STEP_SAMPLE=5;
final int NUM_SAMPLES=NUM_SPLITS * STEP_SAMPLE;
InputSampler.Sampler sampler=new InputSampler.SplitSampler(NUM_SAMPLES,NUM_SPLITS);
int inits[]=new int[TOT_SPLITS];
for (int i=0; i < TOT_SPLITS; ++i) {
inits[i]=i * STEP_SAMPLE;
}
Job ignored=Job.getInstance();
Object[] samples=sampler.getSample(new TestInputSamplerIF(100000,TOT_SPLITS,inits),ignored);
assertEquals(NUM_SAMPLES,samples.length);
Arrays.sort(samples,new IntWritable.Comparator());
for (int i=0; i < NUM_SAMPLES; ++i) {
assertEquals(i,((IntWritable)samples[i]).get());
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* run a distributed job and verify that TokenCache is available
* @throws IOException
*/
@Test public void testBinaryTokenFile() throws IOException {
Configuration conf=mrCluster.getConfig();
final String nnUri=dfsCluster.getURI(0).toString();
conf.set(MRJobConfig.JOB_NAMENODES,nnUri + "," + nnUri);
final String[] args={"-m","1","-r","1","-mt","1","-rt","1"};
int res=-1;
try {
res=ToolRunner.run(conf,new MySleepJob(),args);
}
catch ( Exception e) {
System.out.println("Job failed with " + e.getLocalizedMessage());
e.printStackTrace(System.out);
fail("Job failed");
}
assertEquals("dist job res is not 0:",0,res);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* run a distributed job and verify that TokenCache is available
* @throws IOException
*/
@Test public void test() throws IOException {
Configuration jobConf=new JobConf(mrCluster.getConfig());
NameNode nn=dfsCluster.getNameNode();
URI nnUri=NameNode.getUri(nn.getNameNodeAddress());
jobConf.set(JobContext.JOB_NAMENODES,nnUri + "," + nnUri.toString());
jobConf.set("mapreduce.job.credentials.json","keys.json");
String[] args={"-m","1","-r","1","-mt","1","-rt","1"};
int res=-1;
try {
res=ToolRunner.run(jobConf,new CredentialsTestJob(),args);
}
catch ( Exception e) {
System.out.println("Job failed with" + e.getLocalizedMessage());
e.printStackTrace(System.out);
fail("Job failed");
}
assertEquals("dist job res is not 0",res,0);
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@SuppressWarnings("deprecation") @Test public void testGetTokensForNamenodes() throws IOException, URISyntaxException {
Path TEST_ROOT_DIR=new Path(System.getProperty("test.build.data","test/build/data"));
String binaryTokenFile=FileSystem.getLocal(conf).makeQualified(new Path(TEST_ROOT_DIR,"tokenFile")).toUri().getPath();
MockFileSystem fs1=createFileSystemForServiceName("service1");
Credentials creds=new Credentials();
Token> token1=fs1.getDelegationToken(renewer);
creds.addToken(token1.getService(),token1);
conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY,binaryTokenFile);
creds.writeTokenStorageFile(new Path(binaryTokenFile),conf);
TokenCache.obtainTokensForNamenodesInternal(fs1,creds,conf);
String fs_addr=fs1.getCanonicalServiceName();
Token> nnt=TokenCache.getDelegationToken(creds,fs_addr);
assertNotNull("Token for nn is null",nnt);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMaxBlockLocationsOldSplits() throws Exception {
TEST_DIR.mkdirs();
try {
Configuration conf=new Configuration();
conf.setInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,4);
Path submitDir=new Path(TEST_DIR.getAbsolutePath());
FileSystem fs=FileSystem.getLocal(conf);
org.apache.hadoop.mapred.FileSplit split=new org.apache.hadoop.mapred.FileSplit(new Path("/some/path"),0,1,new String[]{"loc1","loc2","loc3","loc4","loc5"});
JobSplitWriter.createSplitFiles(submitDir,conf,fs,new org.apache.hadoop.mapred.InputSplit[]{split});
JobSplit.TaskSplitMetaInfo[] infos=SplitMetaInfoReader.readSplitMetaInfo(new JobID(),fs,conf,submitDir);
assertEquals("unexpected number of splits",1,infos.length);
assertEquals("unexpected number of split locations",4,infos[0].getLocations().length);
}
finally {
FileUtil.fullyDelete(TEST_DIR);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMaxBlockLocationsNewSplits() throws Exception {
TEST_DIR.mkdirs();
try {
Configuration conf=new Configuration();
conf.setInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,4);
Path submitDir=new Path(TEST_DIR.getAbsolutePath());
FileSystem fs=FileSystem.getLocal(conf);
FileSplit split=new FileSplit(new Path("/some/path"),0,1,new String[]{"loc1","loc2","loc3","loc4","loc5"});
JobSplitWriter.createSplitFiles(submitDir,conf,fs,new FileSplit[]{split});
JobSplit.TaskSplitMetaInfo[] infos=SplitMetaInfoReader.readSplitMetaInfo(new JobID(),fs,conf,submitDir);
assertEquals("unexpected number of splits",1,infos.length);
assertEquals("unexpected number of split locations",4,infos[0].getLocations().length);
}
finally {
FileUtil.fullyDelete(TEST_DIR);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testInterruptOnDisk() throws Exception {
final int FETCHER=7;
Path p=new Path("file:///tmp/foo");
Path pTmp=OnDiskMapOutput.getTempPath(p,FETCHER);
FileSystem mFs=mock(FileSystem.class,RETURNS_DEEP_STUBS);
MapOutputFile mof=mock(MapOutputFile.class);
when(mof.getInputFileForWrite(any(TaskID.class),anyLong())).thenReturn(p);
OnDiskMapOutput odmo=spy(new OnDiskMapOutput(map1ID,id,mm,100L,job,mof,FETCHER,true,mFs,p));
when(mm.reserve(any(TaskAttemptID.class),anyLong(),anyInt())).thenReturn(odmo);
doNothing().when(mm).waitForResource();
when(ss.getHost()).thenReturn(host);
String replyHash=SecureShuffleUtils.generateHash(encHash.getBytes(),key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash);
ShuffleHeader header=new ShuffleHeader(map1ID.toString(),10,10,1);
ByteArrayOutputStream bout=new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
final StuckInputStream in=new StuckInputStream(new ByteArrayInputStream(bout.toByteArray()));
when(connection.getInputStream()).thenReturn(in);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME)).thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION)).thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
doAnswer(new Answer(){
public Void answer( InvocationOnMock ignore) throws IOException {
in.close();
return null;
}
}
).when(connection).disconnect();
Fetcher underTest=new FakeFetcher(job,id,ss,mm,r,metrics,except,key,connection,FETCHER);
underTest.start();
in.waitForFetcher();
underTest.shutDown();
underTest.join();
assertTrue(in.wasClosedProperly());
verify(mFs).create(eq(pTmp));
verify(mFs).delete(eq(pTmp),eq(false));
verify(odmo).abort();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testInterruptInMemory() throws Exception {
final int FETCHER=2;
InMemoryMapOutput immo=spy(new InMemoryMapOutput(job,id,mm,100,null,true));
when(mm.reserve(any(TaskAttemptID.class),anyLong(),anyInt())).thenReturn(immo);
doNothing().when(mm).waitForResource();
when(ss.getHost()).thenReturn(host);
String replyHash=SecureShuffleUtils.generateHash(encHash.getBytes(),key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME)).thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION)).thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash);
ShuffleHeader header=new ShuffleHeader(map1ID.toString(),10,10,1);
ByteArrayOutputStream bout=new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
final StuckInputStream in=new StuckInputStream(new ByteArrayInputStream(bout.toByteArray()));
when(connection.getInputStream()).thenReturn(in);
doAnswer(new Answer(){
public Void answer( InvocationOnMock ignore) throws IOException {
in.close();
return null;
}
}
).when(connection).disconnect();
Fetcher underTest=new FakeFetcher(job,id,ss,mm,r,metrics,except,key,connection,FETCHER);
underTest.start();
in.waitForFetcher();
underTest.shutDown();
underTest.join();
assertTrue(in.wasClosedProperly());
verify(immo).abort();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings({"unchecked","deprecation"}) @Test(timeout=10000) public void testOnDiskMerger() throws IOException, URISyntaxException, InterruptedException {
JobConf jobConf=new JobConf();
final int SORT_FACTOR=5;
jobConf.setInt(MRJobConfig.IO_SORT_FACTOR,SORT_FACTOR);
MapOutputFile mapOutputFile=new MROutputFiles();
FileSystem fs=FileSystem.getLocal(jobConf);
MergeManagerImpl manager=new MergeManagerImpl(null,jobConf,fs,null,null,null,null,null,null,null,null,null,null,mapOutputFile);
MergeThread,IntWritable,IntWritable> onDiskMerger=(MergeThread,IntWritable,IntWritable>)Whitebox.getInternalState(manager,"onDiskMerger");
int mergeFactor=(Integer)Whitebox.getInternalState(onDiskMerger,"mergeFactor");
assertEquals(mergeFactor,SORT_FACTOR);
onDiskMerger.suspend();
Random rand=new Random();
for (int i=0; i < 2 * SORT_FACTOR; ++i) {
Path path=new Path("somePath");
CompressAwarePath cap=new CompressAwarePath(path,1l,rand.nextInt());
manager.closeOnDiskFile(cap);
}
LinkedList> pendingToBeMerged=(LinkedList>)Whitebox.getInternalState(onDiskMerger,"pendingToBeMerged");
assertTrue("No inputs were added to list pending to merge",pendingToBeMerged.size() > 0);
for (int i=0; i < pendingToBeMerged.size(); ++i) {
List inputs=pendingToBeMerged.get(i);
for (int j=1; j < inputs.size(); ++j) {
assertTrue("Not enough / too many inputs were going to be merged",inputs.size() > 0 && inputs.size() <= SORT_FACTOR);
assertTrue("Inputs to be merged were not sorted according to size: ",inputs.get(j).getCompressedSize() >= inputs.get(j - 1).getCompressedSize());
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testJobSucceed() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testJobSucceed().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
JobConf conf=new JobConf(mrCluster.getConfig());
Path in=new Path(mrCluster.getTestWorkDir().getAbsolutePath(),"in");
Path out=new Path(mrCluster.getTestWorkDir().getAbsolutePath(),"out");
runJobSucceed(conf,in,out);
FileSystem fs=FileSystem.get(conf);
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.JOB_SETUP_FILE_NAME)));
Assert.assertFalse(fs.exists(new Path(out,CustomOutputCommitter.JOB_ABORT_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.JOB_COMMIT_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.TASK_SETUP_FILE_NAME)));
Assert.assertFalse(fs.exists(new Path(out,CustomOutputCommitter.TASK_ABORT_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.TASK_COMMIT_FILE_NAME)));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testJobFail() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testJobFail().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
JobConf conf=new JobConf(mrCluster.getConfig());
Path in=new Path(mrCluster.getTestWorkDir().getAbsolutePath(),"fail-in");
Path out=new Path(mrCluster.getTestWorkDir().getAbsolutePath(),"fail-out");
runJobFail(conf,in,out);
FileSystem fs=FileSystem.get(conf);
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.JOB_SETUP_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.JOB_ABORT_FILE_NAME)));
Assert.assertFalse(fs.exists(new Path(out,CustomOutputCommitter.JOB_COMMIT_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.TASK_SETUP_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.TASK_ABORT_FILE_NAME)));
Assert.assertFalse(fs.exists(new Path(out,CustomOutputCommitter.TASK_COMMIT_FILE_NAME)));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRMNMInfo() throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
RMContext rmc=mrCluster.getResourceManager().getRMContext();
ResourceScheduler rms=mrCluster.getResourceManager().getResourceScheduler();
RMNMInfo rmInfo=new RMNMInfo(rmc,rms);
String liveNMs=rmInfo.getLiveNodeManagers();
ObjectMapper mapper=new ObjectMapper();
JsonNode jn=mapper.readTree(liveNMs);
Assert.assertEquals("Unexpected number of live nodes:",NUMNODEMANAGERS,jn.size());
Iterator it=jn.iterator();
while (it.hasNext()) {
JsonNode n=it.next();
Assert.assertNotNull(n.get("HostName"));
Assert.assertNotNull(n.get("Rack"));
Assert.assertTrue("Node " + n.get("NodeId") + " should be RUNNING",n.get("State").asText().contains("RUNNING"));
Assert.assertNotNull(n.get("NodeHTTPAddress"));
Assert.assertNotNull(n.get("LastHealthUpdate"));
Assert.assertNotNull(n.get("HealthReport"));
Assert.assertNotNull(n.get("NodeManagerVersion"));
Assert.assertNotNull(n.get("NumContainers"));
Assert.assertEquals(n.get("NodeId") + ": Unexpected number of used containers",0,n.get("NumContainers").asInt());
Assert.assertEquals(n.get("NodeId") + ": Unexpected amount of used memory",0,n.get("UsedMemoryMB").asInt());
Assert.assertNotNull(n.get("AvailableMemoryMB"));
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRMNMInfoMissmatch() throws Exception {
RMContext rmc=mock(RMContext.class);
ResourceScheduler rms=mock(ResourceScheduler.class);
ConcurrentMap map=new ConcurrentHashMap();
RMNode node=MockNodes.newNodeInfo(1,MockNodes.newResource(4 * 1024));
map.put(node.getNodeID(),node);
when(rmc.getRMNodes()).thenReturn(map);
RMNMInfo rmInfo=new RMNMInfo(rmc,rms);
String liveNMs=rmInfo.getLiveNodeManagers();
ObjectMapper mapper=new ObjectMapper();
JsonNode jn=mapper.readTree(liveNMs);
Assert.assertEquals("Unexpected number of live nodes:",1,jn.size());
Iterator it=jn.iterator();
while (it.hasNext()) {
JsonNode n=it.next();
Assert.assertNotNull(n.get("HostName"));
Assert.assertNotNull(n.get("Rack"));
Assert.assertTrue("Node " + n.get("NodeId") + " should be RUNNING",n.get("State").asText().contains("RUNNING"));
Assert.assertNotNull(n.get("NodeHTTPAddress"));
Assert.assertNotNull(n.get("LastHealthUpdate"));
Assert.assertNotNull(n.get("HealthReport"));
Assert.assertNotNull(n.get("NodeManagerVersion"));
Assert.assertNull(n.get("NumContainers"));
Assert.assertNull(n.get("UsedMemoryMB"));
Assert.assertNull(n.get("AvailableMemoryMB"));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTaskAttemptId(){
long ts1=1315890136000l;
long ts2=1315890136001l;
TaskAttemptId t1=createTaskAttemptId(ts1,2,2,TaskType.MAP,2);
TaskAttemptId t2=createTaskAttemptId(ts1,2,2,TaskType.REDUCE,2);
TaskAttemptId t3=createTaskAttemptId(ts1,2,2,TaskType.MAP,3);
TaskAttemptId t4=createTaskAttemptId(ts1,2,2,TaskType.MAP,1);
TaskAttemptId t5=createTaskAttemptId(ts1,2,1,TaskType.MAP,3);
TaskAttemptId t6=createTaskAttemptId(ts1,2,2,TaskType.MAP,2);
assertTrue(t1.equals(t6));
assertFalse(t1.equals(t2));
assertFalse(t1.equals(t3));
assertFalse(t1.equals(t5));
assertTrue(t1.compareTo(t6) == 0);
assertTrue(t1.compareTo(t2) < 0);
assertTrue(t1.compareTo(t3) < 0);
assertTrue(t1.compareTo(t4) > 0);
assertTrue(t1.compareTo(t5) > 0);
assertTrue(t1.hashCode() == t6.hashCode());
assertFalse(t1.hashCode() == t2.hashCode());
assertFalse(t1.hashCode() == t3.hashCode());
assertFalse(t1.hashCode() == t5.hashCode());
TaskAttemptId t7=createTaskAttemptId(ts2,5463346,4326575,TaskType.REDUCE,54375);
assertEquals("attempt_" + ts1 + "_0002_m_000002_2",t1.toString());
assertEquals("attempt_" + ts2 + "_5463346_r_4326575_54375",t7.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTaskId(){
long ts1=1315890136000l;
long ts2=1315890136001l;
TaskId t1=createTaskId(ts1,1,2,TaskType.MAP);
TaskId t2=createTaskId(ts1,1,2,TaskType.REDUCE);
TaskId t3=createTaskId(ts1,1,1,TaskType.MAP);
TaskId t4=createTaskId(ts1,1,2,TaskType.MAP);
TaskId t5=createTaskId(ts2,1,1,TaskType.MAP);
assertTrue(t1.equals(t4));
assertFalse(t1.equals(t2));
assertFalse(t1.equals(t3));
assertFalse(t1.equals(t5));
assertTrue(t1.compareTo(t4) == 0);
assertTrue(t1.compareTo(t2) < 0);
assertTrue(t1.compareTo(t3) > 0);
assertTrue(t1.compareTo(t5) < 0);
assertTrue(t1.hashCode() == t4.hashCode());
assertFalse(t1.hashCode() == t2.hashCode());
assertFalse(t1.hashCode() == t3.hashCode());
assertFalse(t1.hashCode() == t5.hashCode());
TaskId t6=createTaskId(ts1,324151,54643747,TaskType.REDUCE);
assertEquals("task_" + ts1 + "_0001_m_000002",t1.toString());
assertEquals("task_" + ts1 + "_324151_r_54643747",t6.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJobId(){
long ts1=1315890136000l;
long ts2=1315890136001l;
JobId j1=createJobId(ts1,2);
JobId j2=createJobId(ts1,1);
JobId j3=createJobId(ts2,1);
JobId j4=createJobId(ts1,2);
assertTrue(j1.equals(j4));
assertFalse(j1.equals(j2));
assertFalse(j1.equals(j3));
assertTrue(j1.compareTo(j4) == 0);
assertTrue(j1.compareTo(j2) > 0);
assertTrue(j1.compareTo(j3) < 0);
assertTrue(j1.hashCode() == j4.hashCode());
assertFalse(j1.hashCode() == j2.hashCode());
assertFalse(j1.hashCode() == j3.hashCode());
JobId j5=createJobId(ts1,231415);
assertEquals("job_" + ts1 + "_0002",j1.toString());
assertEquals("job_" + ts1 + "_231415",j5.toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAMInfosWithoutRecoveryEnabled() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(1,0,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
long am1StartTime=app.getAllAMInfos().get(0).getStartTime();
Assert.assertEquals("No of tasks not correct",1,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask=it.next();
app.waitForState(mapTask,TaskState.RUNNING);
TaskAttempt taskAttempt=mapTask.getAttempts().values().iterator().next();
app.waitForState(taskAttempt,TaskAttemptState.RUNNING);
app.stop();
app=new MRAppWithHistory(1,0,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,false);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",1,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask=it.next();
List amInfos=app.getAllAMInfos();
Assert.assertEquals(2,amInfos.size());
AMInfo amInfoOne=amInfos.get(0);
Assert.assertEquals(am1StartTime,amInfoOne.getStartTime());
app.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskFailWithUnusedContainer() throws Exception {
MRApp app=new MRAppWithFailingTaskAndUnusedContainer();
Configuration conf=new Configuration();
int maxAttempts=1;
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS,maxAttempts);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Map tasks=job.getTasks();
Assert.assertEquals("Num tasks is not correct",1,tasks.size());
Task task=tasks.values().iterator().next();
app.waitForState(task,TaskState.SCHEDULED);
Map attempts=tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct",maxAttempts,attempts.size());
TaskAttempt attempt=attempts.values().iterator().next();
app.waitForInternalState((TaskAttemptImpl)attempt,TaskAttemptStateInternal.ASSIGNED);
app.getDispatcher().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(),TaskAttemptEventType.TA_CONTAINER_COMPLETED));
app.waitForState(job,JobState.FAILED);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFailTask() throws Exception {
MRApp app=new MockFirstFailingAttemptMRApp(1,0);
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.SUCCEEDED);
Map tasks=job.getTasks();
Assert.assertEquals("Num tasks is not correct",1,tasks.size());
Task task=tasks.values().iterator().next();
Assert.assertEquals("Task state not correct",TaskState.SUCCEEDED,task.getReport().getTaskState());
Map attempts=tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct",2,attempts.size());
Iterator it=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.FAILED,it.next().getReport().getTaskAttemptState());
Assert.assertEquals("Attempt state not correct",TaskAttemptState.SUCCEEDED,it.next().getReport().getTaskAttemptState());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTimedOutTask() throws Exception {
MRApp app=new TimeOutTaskMRApp(1,0);
Configuration conf=new Configuration();
int maxAttempts=2;
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS,maxAttempts);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.FAILED);
Map tasks=job.getTasks();
Assert.assertEquals("Num tasks is not correct",1,tasks.size());
Task task=tasks.values().iterator().next();
Assert.assertEquals("Task state not correct",TaskState.FAILED,task.getReport().getTaskState());
Map attempts=tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct",maxAttempts,attempts.size());
for ( TaskAttempt attempt : attempts.values()) {
Assert.assertEquals("Attempt state not correct",TaskAttemptState.FAILED,attempt.getReport().getTaskAttemptState());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFetchFailureMultipleReduces() throws Exception {
MRApp app=new MRApp(1,3,false,this.getClass().getName(),true);
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",4,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask=it.next();
Task reduceTask=it.next();
Task reduceTask2=it.next();
Task reduceTask3=it.next();
app.waitForState(mapTask,TaskState.RUNNING);
TaskAttempt mapAttempt1=mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Num completion events not correct",1,events.length);
Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.SUCCEEDED,events[0].getStatus());
app.waitForState(reduceTask,TaskState.RUNNING);
app.waitForState(reduceTask2,TaskState.RUNNING);
app.waitForState(reduceTask3,TaskState.RUNNING);
TaskAttempt reduceAttempt=reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt,TaskAttemptState.RUNNING);
updateStatus(app,reduceAttempt,Phase.SHUFFLE);
TaskAttempt reduceAttempt2=reduceTask2.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt2,TaskAttemptState.RUNNING);
updateStatus(app,reduceAttempt2,Phase.SHUFFLE);
TaskAttempt reduceAttempt3=reduceTask3.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt3,TaskAttemptState.RUNNING);
updateStatus(app,reduceAttempt3,Phase.SHUFFLE);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
assertEquals(TaskState.SUCCEEDED,mapTask.getState());
updateStatus(app,reduceAttempt2,Phase.REDUCE);
updateStatus(app,reduceAttempt3,Phase.REDUCE);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
app.waitForState(mapTask,TaskState.RUNNING);
Assert.assertEquals("Map TaskAttempt state not correct",TaskAttemptState.FAILED,mapAttempt1.getState());
Assert.assertEquals("Num attempts in Map Task not correct",2,mapTask.getAttempts().size());
Iterator atIt=mapTask.getAttempts().values().iterator();
atIt.next();
TaskAttempt mapAttempt2=atIt.next();
app.waitForState(mapAttempt2,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt2.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt2.getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt3.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.OBSOLETE,events[0].getStatus());
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Num completion events not correct",6,events.length);
Assert.assertEquals("Event map attempt id not correct",mapAttempt1.getID(),events[0].getAttemptId());
Assert.assertEquals("Event map attempt id not correct",mapAttempt1.getID(),events[1].getAttemptId());
Assert.assertEquals("Event map attempt id not correct",mapAttempt2.getID(),events[2].getAttemptId());
Assert.assertEquals("Event reduce attempt id not correct",reduceAttempt.getID(),events[3].getAttemptId());
Assert.assertEquals("Event status not correct for map attempt1",TaskAttemptCompletionEventStatus.OBSOLETE,events[0].getStatus());
Assert.assertEquals("Event status not correct for map attempt1",TaskAttemptCompletionEventStatus.FAILED,events[1].getStatus());
Assert.assertEquals("Event status not correct for map attempt2",TaskAttemptCompletionEventStatus.SUCCEEDED,events[2].getStatus());
Assert.assertEquals("Event status not correct for reduce attempt1",TaskAttemptCompletionEventStatus.SUCCEEDED,events[3].getStatus());
TaskCompletionEvent mapEvents[]=job.getMapAttemptCompletionEvents(0,2);
TaskCompletionEvent convertedEvents[]=TypeConverter.fromYarn(events);
Assert.assertEquals("Incorrect number of map events",2,mapEvents.length);
Assert.assertArrayEquals("Unexpected map events",Arrays.copyOfRange(convertedEvents,0,2),mapEvents);
mapEvents=job.getMapAttemptCompletionEvents(2,200);
Assert.assertEquals("Incorrect number of map events",1,mapEvents.length);
Assert.assertEquals("Unexpected map event",convertedEvents[2],mapEvents[0]);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFetchFailure() throws Exception {
MRApp app=new MRApp(1,1,false,this.getClass().getName(),true);
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",2,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask,TaskState.RUNNING);
TaskAttempt mapAttempt1=mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Num completion events not correct",1,events.length);
Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.SUCCEEDED,events[0].getStatus());
app.waitForState(reduceTask,TaskState.RUNNING);
TaskAttempt reduceAttempt=reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt,TaskAttemptState.RUNNING);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
app.waitForState(mapTask,TaskState.RUNNING);
Assert.assertEquals("Map TaskAttempt state not correct",TaskAttemptState.FAILED,mapAttempt1.getState());
Assert.assertEquals("Num attempts in Map Task not correct",2,mapTask.getAttempts().size());
Iterator atIt=mapTask.getAttempts().values().iterator();
atIt.next();
TaskAttempt mapAttempt2=atIt.next();
app.waitForState(mapAttempt2,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt2.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.OBSOLETE,events[0].getStatus());
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Num completion events not correct",4,events.length);
Assert.assertEquals("Event map attempt id not correct",mapAttempt1.getID(),events[0].getAttemptId());
Assert.assertEquals("Event map attempt id not correct",mapAttempt1.getID(),events[1].getAttemptId());
Assert.assertEquals("Event map attempt id not correct",mapAttempt2.getID(),events[2].getAttemptId());
Assert.assertEquals("Event redude attempt id not correct",reduceAttempt.getID(),events[3].getAttemptId());
Assert.assertEquals("Event status not correct for map attempt1",TaskAttemptCompletionEventStatus.OBSOLETE,events[0].getStatus());
Assert.assertEquals("Event status not correct for map attempt1",TaskAttemptCompletionEventStatus.FAILED,events[1].getStatus());
Assert.assertEquals("Event status not correct for map attempt2",TaskAttemptCompletionEventStatus.SUCCEEDED,events[2].getStatus());
Assert.assertEquals("Event status not correct for reduce attempt1",TaskAttemptCompletionEventStatus.SUCCEEDED,events[3].getStatus());
TaskCompletionEvent mapEvents[]=job.getMapAttemptCompletionEvents(0,2);
TaskCompletionEvent convertedEvents[]=TypeConverter.fromYarn(events);
Assert.assertEquals("Incorrect number of map events",2,mapEvents.length);
Assert.assertArrayEquals("Unexpected map events",Arrays.copyOfRange(convertedEvents,0,2),mapEvents);
mapEvents=job.getMapAttemptCompletionEvents(2,200);
Assert.assertEquals("Incorrect number of map events",1,mapEvents.length);
Assert.assertEquals("Unexpected map event",convertedEvents[2],mapEvents[0]);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testKillTaskAttempt() throws Exception {
final CountDownLatch latch=new CountDownLatch(1);
MRApp app=new BlockingMRApp(2,0,latch);
Job job=app.submit(new Configuration());
app.waitForState(job,JobState.RUNNING);
Map tasks=job.getTasks();
Assert.assertEquals("No of tasks is not correct",2,tasks.size());
Iterator it=tasks.values().iterator();
Task task1=it.next();
Task task2=it.next();
app.waitForState(task1,TaskState.SCHEDULED);
app.waitForState(task2,TaskState.SCHEDULED);
TaskAttempt attempt=task1.getAttempts().values().iterator().next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(),TaskAttemptEventType.TA_KILL));
latch.countDown();
app.waitForState(job,JobState.SUCCEEDED);
Assert.assertEquals("Task state not correct",TaskState.SUCCEEDED,task1.getReport().getTaskState());
Assert.assertEquals("Task state not correct",TaskState.SUCCEEDED,task2.getReport().getTaskState());
Map attempts=task1.getAttempts();
Assert.assertEquals("No of attempts is not correct",2,attempts.size());
Iterator iter=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.KILLED,iter.next().getReport().getTaskAttemptState());
Assert.assertEquals("Attempt state not correct",TaskAttemptState.SUCCEEDED,iter.next().getReport().getTaskAttemptState());
attempts=task2.getAttempts();
Assert.assertEquals("No of attempts is not correct",1,attempts.size());
iter=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.SUCCEEDED,iter.next().getReport().getTaskAttemptState());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testKillJob() throws Exception {
final CountDownLatch latch=new CountDownLatch(1);
MRApp app=new BlockingMRApp(1,0,latch);
Job job=app.submit(new Configuration());
app.waitForState(job,JobState.RUNNING);
app.getContext().getEventHandler().handle(new JobEvent(job.getID(),JobEventType.JOB_KILL));
latch.countDown();
app.waitForState(job,JobState.KILLED);
Map tasks=job.getTasks();
Assert.assertEquals("No of tasks is not correct",1,tasks.size());
Task task=tasks.values().iterator().next();
Assert.assertEquals("Task state not correct",TaskState.KILLED,task.getReport().getTaskState());
Map attempts=tasks.values().iterator().next().getAttempts();
Assert.assertEquals("No of attempts is not correct",1,attempts.size());
Iterator it=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.KILLED,it.next().getReport().getTaskAttemptState());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testKillTask() throws Exception {
final CountDownLatch latch=new CountDownLatch(1);
MRApp app=new BlockingMRApp(2,0,latch);
Job job=app.submit(new Configuration());
app.waitForState(job,JobState.RUNNING);
Map tasks=job.getTasks();
Assert.assertEquals("No of tasks is not correct",2,tasks.size());
Iterator it=tasks.values().iterator();
Task task1=it.next();
Task task2=it.next();
app.getContext().getEventHandler().handle(new TaskEvent(task1.getID(),TaskEventType.T_KILL));
latch.countDown();
app.waitForState(job,JobState.SUCCEEDED);
Assert.assertEquals("Task state not correct",TaskState.KILLED,task1.getReport().getTaskState());
Assert.assertEquals("Task state not correct",TaskState.SUCCEEDED,task2.getReport().getTaskState());
Map attempts=task1.getAttempts();
Assert.assertEquals("No of attempts is not correct",1,attempts.size());
Iterator iter=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.KILLED,iter.next().getReport().getTaskAttemptState());
attempts=task2.getAttempts();
Assert.assertEquals("No of attempts is not correct",1,attempts.size());
iter=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.SUCCEEDED,iter.next().getReport().getTaskAttemptState());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testContainerPassThrough() throws Exception {
MRApp app=new MRApp(0,1,true,this.getClass().getName(),true){
@Override protected ContainerLauncher createContainerLauncher( AppContext context){
return new MockContainerLauncher(){
@Override public void handle( ContainerLauncherEvent event){
if (event instanceof ContainerRemoteLaunchEvent) {
containerObtainedByContainerLauncher=((ContainerRemoteLaunchEvent)event).getAllocatedContainer();
}
super.handle(event);
}
}
;
}
}
;
Job job=app.submit(new Configuration());
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Collection tasks=job.getTasks().values();
Collection taskAttempts=tasks.iterator().next().getAttempts().values();
TaskAttemptImpl taskAttempt=(TaskAttemptImpl)taskAttempts.iterator().next();
Assert.assertTrue(taskAttempt.container == containerObtainedByContainerLauncher);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* The test verifies that the AM re-runs maps that have run on bad nodes. It
* also verifies that the AM records all success/killed events so that reduces
* are notified about map output status changes. It also verifies that the
* re-run information is preserved across AM restart
*/
@Test public void testUpdatedNodes() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(2,2,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART,0.5f);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",4,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
TaskAttempt task1Attempt=mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
NodeId node1=task1Attempt.getNodeId();
NodeId node2=task2Attempt.getNodeId();
Assert.assertEquals(node1,node2);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 2 completion events for success",2,events.length);
ArrayList updatedNodes=new ArrayList();
NodeReport nr=RecordFactoryProvider.getRecordFactory(null).newRecordInstance(NodeReport.class);
nr.setNodeId(node1);
nr.setNodeState(NodeState.UNHEALTHY);
updatedNodes.add(nr);
app.getContext().getEventHandler().handle(new JobUpdatedNodesEvent(job.getID(),updatedNodes));
app.waitForState(task1Attempt,TaskAttemptState.KILLED);
app.waitForState(task2Attempt,TaskAttemptState.KILLED);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 2 more completion events for killed",4,events.length);
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
Iterator itr=mapTask1.getAttempts().values().iterator();
itr.next();
task1Attempt=itr.next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 1 more completion events for success",5,events.length);
app.stop();
app=new MRAppWithHistory(2,2,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",4,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
Task reduceTask1=it.next();
Task reduceTask2=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 2 completion events for killed & success of map1",2,events.length);
task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 1 more completion events for success",3,events.length);
app.waitForState(reduceTask1,TaskState.RUNNING);
app.waitForState(reduceTask2,TaskState.RUNNING);
TaskAttempt task3Attempt=reduceTask1.getAttempts().values().iterator().next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task3Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask1,TaskState.SUCCEEDED);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task3Attempt.getID(),TaskAttemptEventType.TA_KILL));
app.waitForState(reduceTask1,TaskState.SUCCEEDED);
TaskAttempt task4Attempt=reduceTask2.getAttempts().values().iterator().next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task4Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask2,TaskState.SUCCEEDED);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 2 more completion events for reduce success",5,events.length);
app.waitForState(job,JobState.SUCCEEDED);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMRAppMasterFailLock() throws IOException, InterruptedException {
String applicationAttemptIdStr="appattempt_1317529182569_0004_000002";
String containerIdStr="container_1317529182569_0004_000002_1";
String userName="TestAppMasterUser";
JobConf conf=new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr);
JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
Path start=MRApps.getStartJobCommitFile(conf,userName,jobId);
Path end=MRApps.getEndJobCommitFailureFile(conf,userName,jobId);
FileSystem fs=FileSystem.get(conf);
fs.create(start).close();
fs.create(end).close();
ContainerId containerId=ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false);
boolean caught=false;
try {
MRAppMaster.initAndStartAppMaster(appMaster,conf,userName);
}
catch ( IOException e) {
LOG.info("Caught expected Exception",e);
caught=true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
assertEquals(JobStateInternal.FAILED,appMaster.forcedState);
appMaster.stop();
verifyFailedStatus((MRAppMasterTest)appMaster,"FAILED");
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMRAppMasterMissingStaging() throws IOException, InterruptedException {
String applicationAttemptIdStr="appattempt_1317529182569_0004_000002";
String containerIdStr="container_1317529182569_0004_000002_1";
String userName="TestAppMasterUser";
JobConf conf=new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr);
File dir=new File(stagingDir);
if (dir.exists()) {
FileUtils.deleteDirectory(dir);
}
ContainerId containerId=ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false);
boolean caught=false;
try {
MRAppMaster.initAndStartAppMaster(appMaster,conf,userName);
}
catch ( IOException e) {
LOG.info("Caught expected Exception",e);
caught=true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
assertEquals(JobStateInternal.ERROR,appMaster.forcedState);
appMaster.stop();
}
APIUtilityVerifier IterativeVerifier EqualityVerifier
@Test(timeout=30000) public void testMRAppMasterMaxAppAttempts() throws IOException, InterruptedException {
Boolean[] expectedBools=new Boolean[]{false,false,false};
String applicationAttemptIdStr="appattempt_1317529182569_0004_000002";
String containerIdStr="container_1317529182569_0004_000002_1";
String userName="TestAppMasterUser";
ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr);
ContainerId containerId=ConverterUtils.toContainerId(containerIdStr);
JobConf conf=new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
File stagingDir=new File(MRApps.getStagingAreaDir(conf,userName).toString());
stagingDir.mkdirs();
for (int i=0; i < expectedBools.length; ++i) {
MRAppMasterTest appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,true);
MRAppMaster.initAndStartAppMaster(appMaster,conf,userName);
assertEquals("isLastAMRetry is correctly computed.",expectedBools[i],appMaster.isLastAMRetry());
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testMRAppMasterForDifferentUser() throws IOException, InterruptedException {
String applicationAttemptIdStr="appattempt_1317529182569_0004_000001";
String containerIdStr="container_1317529182569_0004_000001_1";
String userName="TestAppMasterUser";
ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr);
ContainerId containerId=ConverterUtils.toContainerId(containerIdStr);
MRAppMasterTest appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis());
JobConf conf=new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
MRAppMaster.initAndStartAppMaster(appMaster,conf,userName);
Path userPath=new Path(stagingDir,userName);
Path userStagingPath=new Path(userPath,".staging");
assertEquals(userStagingPath.toString(),appMaster.stagingDirPath.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMRAppMasterSuccessLock() throws IOException, InterruptedException {
String applicationAttemptIdStr="appattempt_1317529182569_0004_000002";
String containerIdStr="container_1317529182569_0004_000002_1";
String userName="TestAppMasterUser";
JobConf conf=new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr);
JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
Path start=MRApps.getStartJobCommitFile(conf,userName,jobId);
Path end=MRApps.getEndJobCommitSuccessFile(conf,userName,jobId);
FileSystem fs=FileSystem.get(conf);
fs.create(start).close();
fs.create(end).close();
ContainerId containerId=ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false);
boolean caught=false;
try {
MRAppMaster.initAndStartAppMaster(appMaster,conf,userName);
}
catch ( IOException e) {
LOG.info("Caught expected Exception",e);
caught=true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
assertEquals(JobStateInternal.SUCCEEDED,appMaster.forcedState);
appMaster.stop();
verifyFailedStatus((MRAppMasterTest)appMaster,"SUCCEEDED");
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testMRAppMasterCredentials() throws Exception {
Logger rootLogger=LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
Credentials credentials=new Credentials();
byte[] identifier="MyIdentifier".getBytes();
byte[] password="MyPassword".getBytes();
Text kind=new Text("MyTokenKind");
Text service=new Text("host:port");
Token extends TokenIdentifier> myToken=new Token(identifier,password,kind,service);
Text tokenAlias=new Text("myToken");
credentials.addToken(tokenAlias,myToken);
Text appTokenService=new Text("localhost:0");
Token appToken=new Token(identifier,password,AMRMTokenIdentifier.KIND_NAME,appTokenService);
credentials.addToken(appTokenService,appToken);
Text keyAlias=new Text("mySecretKeyAlias");
credentials.addSecretKey(keyAlias,"mySecretKey".getBytes());
Token extends TokenIdentifier> storedToken=credentials.getToken(tokenAlias);
JobConf conf=new JobConf();
Path tokenFilePath=new Path(testDir.getAbsolutePath(),"tokens-file");
Map newEnv=new HashMap();
newEnv.put(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION,tokenFilePath.toUri().getPath());
setNewEnvironmentHack(newEnv);
credentials.writeTokenStorageFile(tokenFilePath,conf);
ApplicationId appId=ApplicationId.newInstance(12345,56);
ApplicationAttemptId applicationAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId containerId=ContainerId.newInstance(applicationAttemptId,546);
String userName=UserGroupInformation.getCurrentUser().getShortUserName();
File stagingDir=new File(MRApps.getStagingAreaDir(conf,userName).toString());
stagingDir.mkdirs();
UserGroupInformation.setLoginUser(null);
MRAppMasterTest appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,true);
MRAppMaster.initAndStartAppMaster(appMaster,conf,userName);
Credentials appMasterCreds=appMaster.getCredentials();
Assert.assertNotNull(appMasterCreds);
Assert.assertEquals(1,appMasterCreds.numberOfSecretKeys());
Assert.assertEquals(1,appMasterCreds.numberOfTokens());
Token extends TokenIdentifier> usedToken=appMasterCreds.getToken(tokenAlias);
Assert.assertNotNull(usedToken);
Assert.assertEquals(storedToken,usedToken);
byte[] usedKey=appMasterCreds.getSecretKey(keyAlias);
Assert.assertNotNull(usedKey);
Assert.assertEquals("mySecretKey",new String(usedKey));
Credentials confCredentials=conf.getCredentials();
Assert.assertEquals(1,confCredentials.numberOfSecretKeys());
Assert.assertEquals(1,confCredentials.numberOfTokens());
Assert.assertEquals(storedToken,confCredentials.getToken(tokenAlias));
Assert.assertEquals("mySecretKey",new String(confCredentials.getSecretKey(keyAlias)));
Credentials ugiCredentials=appMaster.getUgi().getCredentials();
Assert.assertEquals(1,ugiCredentials.numberOfSecretKeys());
Assert.assertEquals(2,ugiCredentials.numberOfTokens());
Assert.assertEquals(storedToken,ugiCredentials.getToken(tokenAlias));
Assert.assertEquals(appToken,ugiCredentials.getToken(appTokenService));
Assert.assertEquals("mySecretKey",new String(ugiCredentials.getSecretKey(keyAlias)));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMRAppMasterMidLock() throws IOException, InterruptedException {
String applicationAttemptIdStr="appattempt_1317529182569_0004_000002";
String containerIdStr="container_1317529182569_0004_000002_1";
String userName="TestAppMasterUser";
JobConf conf=new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr);
JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
Path start=MRApps.getStartJobCommitFile(conf,userName,jobId);
FileSystem fs=FileSystem.get(conf);
fs.create(start).close();
ContainerId containerId=ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false);
boolean caught=false;
try {
MRAppMaster.initAndStartAppMaster(appMaster,conf,userName);
}
catch ( IOException e) {
LOG.info("Caught expected Exception",e);
caught=true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
assertEquals(JobStateInternal.ERROR,appMaster.forcedState);
appMaster.stop();
verifyFailedStatus((MRAppMasterTest)appMaster,"FAILED");
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void test() throws Exception {
MRAppWithClientService app=new MRAppWithClientService(1,0,false);
Configuration conf=new Configuration();
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",1,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task task=it.next();
app.waitForState(task,TaskState.RUNNING);
TaskAttempt attempt=task.getAttempts().values().iterator().next();
app.waitForState(attempt,TaskAttemptState.RUNNING);
String diagnostic1="Diagnostic1";
String diagnostic2="Diagnostic2";
app.getContext().getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(attempt.getID(),diagnostic1));
TaskAttemptStatus taskAttemptStatus=new TaskAttemptStatus();
taskAttemptStatus.id=attempt.getID();
taskAttemptStatus.progress=0.5f;
taskAttemptStatus.stateString="RUNNING";
taskAttemptStatus.taskState=TaskAttemptState.RUNNING;
taskAttemptStatus.phase=Phase.MAP;
app.getContext().getEventHandler().handle(new TaskAttemptStatusUpdateEvent(attempt.getID(),taskAttemptStatus));
YarnRPC rpc=YarnRPC.create(conf);
MRClientProtocol proxy=(MRClientProtocol)rpc.getProxy(MRClientProtocol.class,app.clientService.getBindAddress(),conf);
GetCountersRequest gcRequest=recordFactory.newRecordInstance(GetCountersRequest.class);
gcRequest.setJobId(job.getID());
Assert.assertNotNull("Counters is null",proxy.getCounters(gcRequest).getCounters());
GetJobReportRequest gjrRequest=recordFactory.newRecordInstance(GetJobReportRequest.class);
gjrRequest.setJobId(job.getID());
JobReport jr=proxy.getJobReport(gjrRequest).getJobReport();
verifyJobReport(jr);
GetTaskAttemptCompletionEventsRequest gtaceRequest=recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class);
gtaceRequest.setJobId(job.getID());
gtaceRequest.setFromEventId(0);
gtaceRequest.setMaxEvents(10);
Assert.assertNotNull("TaskCompletionEvents is null",proxy.getTaskAttemptCompletionEvents(gtaceRequest).getCompletionEventList());
GetDiagnosticsRequest gdRequest=recordFactory.newRecordInstance(GetDiagnosticsRequest.class);
gdRequest.setTaskAttemptId(attempt.getID());
Assert.assertNotNull("Diagnostics is null",proxy.getDiagnostics(gdRequest).getDiagnosticsList());
GetTaskAttemptReportRequest gtarRequest=recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
gtarRequest.setTaskAttemptId(attempt.getID());
TaskAttemptReport tar=proxy.getTaskAttemptReport(gtarRequest).getTaskAttemptReport();
verifyTaskAttemptReport(tar);
GetTaskReportRequest gtrRequest=recordFactory.newRecordInstance(GetTaskReportRequest.class);
gtrRequest.setTaskId(task.getID());
Assert.assertNotNull("TaskReport is null",proxy.getTaskReport(gtrRequest).getTaskReport());
GetTaskReportsRequest gtreportsRequest=recordFactory.newRecordInstance(GetTaskReportsRequest.class);
gtreportsRequest.setJobId(job.getID());
gtreportsRequest.setTaskType(TaskType.MAP);
Assert.assertNotNull("TaskReports for map is null",proxy.getTaskReports(gtreportsRequest).getTaskReportList());
gtreportsRequest=recordFactory.newRecordInstance(GetTaskReportsRequest.class);
gtreportsRequest.setJobId(job.getID());
gtreportsRequest.setTaskType(TaskType.REDUCE);
Assert.assertNotNull("TaskReports for reduce is null",proxy.getTaskReports(gtreportsRequest).getTaskReportList());
List diag=proxy.getDiagnostics(gdRequest).getDiagnosticsList();
Assert.assertEquals("Num diagnostics not correct",1,diag.size());
Assert.assertEquals("Diag 1 not correct",diagnostic1,diag.get(0).toString());
TaskReport taskReport=proxy.getTaskReport(gtrRequest).getTaskReport();
Assert.assertEquals("Num diagnostics not correct",1,taskReport.getDiagnosticsCount());
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testOutputRecovery() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(1,2,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task reduceTask1=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next();
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
Assert.assertEquals(5467,task1Attempt1.getShufflePort());
app.waitForState(reduceTask1,TaskState.RUNNING);
TaskAttempt reduce1Attempt1=reduceTask1.getAttempts().values().iterator().next();
writeOutput(reduce1Attempt1,conf);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask1,TaskState.SUCCEEDED);
app.stop();
app=new MRAppWithHistory(1,2,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
reduceTask1=it.next();
Task reduceTask2=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
task1Attempt1=mapTask1.getAttempts().values().iterator().next();
Assert.assertEquals(5467,task1Attempt1.getShufflePort());
app.waitForState(reduceTask1,TaskState.SUCCEEDED);
app.waitForState(reduceTask2,TaskState.RUNNING);
TaskAttempt reduce2Attempt=reduceTask2.getAttempts().values().iterator().next();
app.waitForState(reduce2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce2Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask2,TaskState.SUCCEEDED);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
validateOutput();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testRecoveryWithoutShuffleSecret() throws Exception {
int runCount=0;
MRApp app=new MRAppNoShuffleSecret(2,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
TaskAttempt task1Attempt=mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task1Attempt,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState());
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.stop();
app=new MRAppNoShuffleSecret(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.waitForState(mapTask1,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask1.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(reduceTask,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testOutputRecoveryMapsOnly() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task reduceTask1=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next();
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
writeBadOutput(task1Attempt1,conf);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
Assert.assertEquals(5467,task1Attempt1.getShufflePort());
app.stop();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask1=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
task1Attempt1=mapTask1.getAttempts().values().iterator().next();
Assert.assertEquals(5467,task1Attempt1.getShufflePort());
app.waitForState(mapTask2,TaskState.RUNNING);
TaskAttempt task2Attempt1=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task2Attempt1,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
Assert.assertEquals(5467,task2Attempt1.getShufflePort());
app.waitForState(reduceTask1,TaskState.RUNNING);
TaskAttempt reduce1Attempt1=reduceTask1.getAttempts().values().iterator().next();
writeOutput(reduce1Attempt1,conf);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask1,TaskState.SUCCEEDED);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
validateOutput();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt
* completely disappears because of failed launch, one attempt gets killed and
* one attempt succeeds. AM crashes after the first tasks finishes and
* recovers completely and succeeds in the second generation.
* @throws Exception
*/
@Test public void testSpeculative() throws Exception {
int runCount=0;
long am1StartTimeEst=System.currentTimeMillis();
MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
long jobStartTime=job.getReport().getStartTime();
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskEvent(mapTask1.getID(),TaskEventType.T_ADD_SPEC_ATTEMPT));
int timeOut=0;
while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) {
Thread.sleep(1000);
LOG.info("Waiting for next attempt to start");
}
Iterator t1it=mapTask1.getAttempts().values().iterator();
TaskAttempt task1Attempt1=t1it.next();
TaskAttempt task1Attempt2=t1it.next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
ContainerId t1a2contId=task1Attempt2.getAssignedContainerID();
LOG.info(t1a2contId.toString());
LOG.info(task1Attempt1.getID().toString());
LOG.info(task1Attempt2.getID().toString());
app.getContext().getEventHandler().handle(new TaskAttemptContainerLaunchedEvent(task1Attempt2.getID(),runCount));
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
app.waitForState(task1Attempt2,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState());
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(task1Attempt1,TaskAttemptState.SUCCEEDED);
app.waitForState(mapTask1,TaskState.SUCCEEDED);
long task1StartTime=mapTask1.getReport().getStartTime();
long task1FinishTime=mapTask1.getReport().getFinishTime();
app.stop();
long am2StartTimeEst=System.currentTimeMillis();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.waitForState(reduceTask,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("Job Start time not correct",jobStartTime,job.getReport().getStartTime());
Assert.assertEquals("Task Start time not correct",task1StartTime,mapTask1.getReport().getStartTime());
Assert.assertEquals("Task Finish time not correct",task1FinishTime,mapTask1.getReport().getFinishTime());
Assert.assertEquals(2,job.getAMInfos().size());
int attemptNum=1;
for ( AMInfo amInfo : job.getAMInfos()) {
Assert.assertEquals(attemptNum++,amInfo.getAppAttemptId().getAttemptId());
Assert.assertEquals(amInfo.getAppAttemptId(),amInfo.getContainerId().getApplicationAttemptId());
Assert.assertEquals(MRApp.NM_HOST,amInfo.getNodeManagerHost());
Assert.assertEquals(MRApp.NM_PORT,amInfo.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT,amInfo.getNodeManagerHttpPort());
}
long am1StartTimeReal=job.getAMInfos().get(0).getStartTime();
long am2StartTimeReal=job.getAMInfos().get(1).getStartTime();
Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst);
Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst && am2StartTimeReal <= System.currentTimeMillis());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testRecoveryWithOldCommiter() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(1,2,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",false);
conf.setBoolean("mapred.reducer.new-api",false);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task reduceTask1=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next();
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
Assert.assertEquals(5467,task1Attempt1.getShufflePort());
app.waitForState(reduceTask1,TaskState.RUNNING);
TaskAttempt reduce1Attempt1=reduceTask1.getAttempts().values().iterator().next();
writeOutput(reduce1Attempt1,conf);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask1,TaskState.SUCCEEDED);
app.stop();
app=new MRAppWithHistory(1,2,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",false);
conf.setBoolean("mapred.reducer.new-api",false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
reduceTask1=it.next();
Task reduceTask2=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
task1Attempt1=mapTask1.getAttempts().values().iterator().next();
Assert.assertEquals(5467,task1Attempt1.getShufflePort());
app.waitForState(reduceTask1,TaskState.SUCCEEDED);
app.waitForState(reduceTask2,TaskState.RUNNING);
TaskAttempt reduce2Attempt=reduceTask2.getAttempts().values().iterator().next();
app.waitForState(reduce2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce2Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask2,TaskState.SUCCEEDED);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
validateOutput();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMultipleCrashes() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState());
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.stop();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.stop();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.waitForState(reduceTask,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt
* completely disappears because of failed launch, one attempt gets killed and
* one attempt succeeds. AM crashes after the first tasks finishes and
* recovers completely and succeeds in the second generation.
* @throws Exception
*/
@Test public void testCrashed() throws Exception {
int runCount=0;
long am1StartTimeEst=System.currentTimeMillis();
MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
long jobStartTime=job.getReport().getStartTime();
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState());
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_FAILMSG));
app.waitForState(task1Attempt1,TaskAttemptState.FAILED);
int timeOut=0;
while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(2,mapTask1.getAttempts().size());
Iterator itr=mapTask1.getAttempts().values().iterator();
itr.next();
TaskAttempt task1Attempt2=itr.next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt2.getID(),TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
app.waitForState(task1Attempt2,TaskAttemptState.FAILED);
timeOut=0;
while (mapTask1.getAttempts().size() != 3 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(3,mapTask1.getAttempts().size());
itr=mapTask1.getAttempts().values().iterator();
itr.next();
itr.next();
TaskAttempt task1Attempt3=itr.next();
app.waitForState(task1Attempt3,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt3.getID(),TaskAttemptEventType.TA_KILL));
app.waitForState(task1Attempt3,TaskAttemptState.KILLED);
timeOut=0;
while (mapTask1.getAttempts().size() != 4 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(4,mapTask1.getAttempts().size());
itr=mapTask1.getAttempts().values().iterator();
itr.next();
itr.next();
itr.next();
TaskAttempt task1Attempt4=itr.next();
app.waitForState(task1Attempt4,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt4.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
long task1StartTime=mapTask1.getReport().getStartTime();
long task1FinishTime=mapTask1.getReport().getFinishTime();
app.stop();
long am2StartTimeEst=System.currentTimeMillis();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.waitForState(reduceTask,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("Job Start time not correct",jobStartTime,job.getReport().getStartTime());
Assert.assertEquals("Task Start time not correct",task1StartTime,mapTask1.getReport().getStartTime());
Assert.assertEquals("Task Finish time not correct",task1FinishTime,mapTask1.getReport().getFinishTime());
Assert.assertEquals(2,job.getAMInfos().size());
int attemptNum=1;
for ( AMInfo amInfo : job.getAMInfos()) {
Assert.assertEquals(attemptNum++,amInfo.getAppAttemptId().getAttemptId());
Assert.assertEquals(amInfo.getAppAttemptId(),amInfo.getContainerId().getApplicationAttemptId());
Assert.assertEquals(MRApp.NM_HOST,amInfo.getNodeManagerHost());
Assert.assertEquals(MRApp.NM_PORT,amInfo.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT,amInfo.getNodeManagerHttpPort());
}
long am1StartTimeReal=job.getAMInfos().get(0).getStartTime();
long am2StartTimeReal=job.getAMInfos().get(1).getStartTime();
Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst);
Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst && am2StartTimeReal <= System.currentTimeMillis());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDeletionofStaging() throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR,stagingJobDir);
fs=mock(FileSystem.class);
when(fs.delete(any(Path.class),anyBoolean())).thenReturn(true);
String user=UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir=MRApps.getStagingAreaDir(conf,user);
when(fs.exists(stagingDir)).thenReturn(true);
ApplicationId appId=ApplicationId.newInstance(System.currentTimeMillis(),0);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1);
JobId jobid=recordFactory.newRecordInstance(JobId.class);
jobid.setAppId(appId);
ContainerAllocator mockAlloc=mock(ContainerAllocator.class);
Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
MRAppMaster appMaster=new TestMRApp(attemptId,mockAlloc,JobStateInternal.RUNNING,MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
appMaster.init(conf);
appMaster.start();
appMaster.shutDownJob();
Assert.assertEquals(true,((TestMRApp)appMaster).getTestIsLastAMRetry());
verify(fs).delete(stagingJobPath,true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testNoDeletionofStagingOnReboot() throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR,stagingJobDir);
fs=mock(FileSystem.class);
when(fs.delete(any(Path.class),anyBoolean())).thenReturn(true);
String user=UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir=MRApps.getStagingAreaDir(conf,user);
when(fs.exists(stagingDir)).thenReturn(true);
ApplicationId appId=ApplicationId.newInstance(System.currentTimeMillis(),0);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerAllocator mockAlloc=mock(ContainerAllocator.class);
Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
MRAppMaster appMaster=new TestMRApp(attemptId,mockAlloc,JobStateInternal.REBOOT,MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
appMaster.init(conf);
appMaster.start();
appMaster.shutDownJob();
Assert.assertEquals(false,((TestMRApp)appMaster).getTestIsLastAMRetry());
verify(fs,times(0)).delete(stagingJobPath,true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testBasic() throws Exception {
AppContext mockContext=mock(AppContext.class);
OutputCommitter mockCommitter=mock(OutputCommitter.class);
Clock mockClock=mock(Clock.class);
CommitterEventHandler handler=new CommitterEventHandler(mockContext,mockCommitter,new TestingRMHeartbeatHandler());
YarnConfiguration conf=new YarnConfiguration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
JobContext mockJobContext=mock(JobContext.class);
ApplicationAttemptId attemptid=ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0");
JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(attemptid.getApplicationId()));
WaitForItHandler waitForItHandler=new WaitForItHandler();
when(mockContext.getApplicationID()).thenReturn(attemptid.getApplicationId());
when(mockContext.getApplicationAttemptId()).thenReturn(attemptid);
when(mockContext.getEventHandler()).thenReturn(waitForItHandler);
when(mockContext.getClock()).thenReturn(mockClock);
handler.init(conf);
handler.start();
try {
handler.handle(new CommitterJobCommitEvent(jobId,mockJobContext));
String user=UserGroupInformation.getCurrentUser().getShortUserName();
Path startCommitFile=MRApps.getStartJobCommitFile(conf,user,jobId);
Path endCommitSuccessFile=MRApps.getEndJobCommitSuccessFile(conf,user,jobId);
Path endCommitFailureFile=MRApps.getEndJobCommitFailureFile(conf,user,jobId);
Event e=waitForItHandler.getAndClearEvent();
assertNotNull(e);
assertTrue(e instanceof JobCommitCompletedEvent);
FileSystem fs=FileSystem.get(conf);
assertTrue(startCommitFile.toString(),fs.exists(startCommitFile));
assertTrue(endCommitSuccessFile.toString(),fs.exists(endCommitSuccessFile));
assertFalse(endCommitFailureFile.toString(),fs.exists(endCommitFailureFile));
verify(mockCommitter).commitJob(any(JobContext.class));
}
finally {
handler.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testFailure() throws Exception {
AppContext mockContext=mock(AppContext.class);
OutputCommitter mockCommitter=mock(OutputCommitter.class);
Clock mockClock=mock(Clock.class);
CommitterEventHandler handler=new CommitterEventHandler(mockContext,mockCommitter,new TestingRMHeartbeatHandler());
YarnConfiguration conf=new YarnConfiguration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
JobContext mockJobContext=mock(JobContext.class);
ApplicationAttemptId attemptid=ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0");
JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(attemptid.getApplicationId()));
WaitForItHandler waitForItHandler=new WaitForItHandler();
when(mockContext.getApplicationID()).thenReturn(attemptid.getApplicationId());
when(mockContext.getApplicationAttemptId()).thenReturn(attemptid);
when(mockContext.getEventHandler()).thenReturn(waitForItHandler);
when(mockContext.getClock()).thenReturn(mockClock);
doThrow(new YarnRuntimeException("Intentional Failure")).when(mockCommitter).commitJob(any(JobContext.class));
handler.init(conf);
handler.start();
try {
handler.handle(new CommitterJobCommitEvent(jobId,mockJobContext));
String user=UserGroupInformation.getCurrentUser().getShortUserName();
Path startCommitFile=MRApps.getStartJobCommitFile(conf,user,jobId);
Path endCommitSuccessFile=MRApps.getEndJobCommitSuccessFile(conf,user,jobId);
Path endCommitFailureFile=MRApps.getEndJobCommitFailureFile(conf,user,jobId);
Event e=waitForItHandler.getAndClearEvent();
assertNotNull(e);
assertTrue(e instanceof JobCommitFailedEvent);
FileSystem fs=FileSystem.get(conf);
assertTrue(fs.exists(startCommitFile));
assertFalse(fs.exists(endCommitSuccessFile));
assertTrue(fs.exists(endCommitFailureFile));
verify(mockCommitter).commitJob(any(JobContext.class));
}
finally {
handler.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=20000) public void testRebootedDuringCommit() throws Exception {
Configuration conf=new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
conf.setInt(MRJobConfig.MR_AM_MAX_ATTEMPTS,2);
AsyncDispatcher dispatcher=new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
CyclicBarrier syncBarrier=new CyclicBarrier(2);
OutputCommitter committer=new WaitingOutputCommitter(syncBarrier,true);
CommitterEventHandler commitHandler=createCommitterEventHandler(dispatcher,committer);
commitHandler.init(conf);
commitHandler.start();
AppContext mockContext=mock(AppContext.class);
when(mockContext.isLastAMRetry()).thenReturn(true);
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false);
JobImpl job=createRunningStubbedJob(conf,dispatcher,2,mockContext);
completeJobTasks(job);
assertJobState(job,JobStateInternal.COMMITTING);
syncBarrier.await();
job.handle(new JobEvent(job.getID(),JobEventType.JOB_AM_REBOOT));
assertJobState(job,JobStateInternal.REBOOT);
Assert.assertEquals(JobState.RUNNING,job.getState());
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
Assert.assertEquals(JobState.ERROR,job.getState());
dispatcher.stop();
commitHandler.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=20000) public void testRebootedDuringSetup() throws Exception {
Configuration conf=new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
AsyncDispatcher dispatcher=new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
OutputCommitter committer=new StubbedOutputCommitter(){
@Override public synchronized void setupJob( JobContext jobContext) throws IOException {
while (!Thread.interrupted()) {
try {
wait();
}
catch ( InterruptedException e) {
}
}
}
}
;
CommitterEventHandler commitHandler=createCommitterEventHandler(dispatcher,committer);
commitHandler.init(conf);
commitHandler.start();
AppContext mockContext=mock(AppContext.class);
when(mockContext.isLastAMRetry()).thenReturn(false);
JobImpl job=createStubbedJob(conf,dispatcher,2,mockContext);
JobId jobId=job.getID();
job.handle(new JobEvent(jobId,JobEventType.JOB_INIT));
assertJobState(job,JobStateInternal.INITED);
job.handle(new JobStartEvent(jobId));
assertJobState(job,JobStateInternal.SETUP);
job.handle(new JobEvent(job.getID(),JobEventType.JOB_AM_REBOOT));
assertJobState(job,JobStateInternal.REBOOT);
Assert.assertEquals(JobState.RUNNING,job.getState());
dispatcher.stop();
commitHandler.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMetaInfoSizeOverMax() throws Exception {
Configuration conf=new Configuration();
JobID jobID=JobID.forName("job_1234567890000_0001");
JobId jobId=TypeConverter.toYarn(jobID);
MRAppMetrics mrAppMetrics=MRAppMetrics.create();
JobImpl job=new JobImpl(jobId,ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,0),0),conf,mock(EventHandler.class),null,new JobTokenSecretManager(),new Credentials(),null,null,mrAppMetrics,null,true,null,0,null,null,null,null);
InitTransition initTransition=new InitTransition(){
@Override protected TaskSplitMetaInfo[] createSplits( JobImpl job, JobId jobId){
throw new YarnRuntimeException(EXCEPTIONMSG);
}
}
;
JobEvent mockJobEvent=mock(JobEvent.class);
JobStateInternal jobSI=initTransition.transition(job,mockJobEvent);
Assert.assertTrue("When init fails, return value from InitTransition.transition should equal NEW.",jobSI.equals(JobStateInternal.NEW));
Assert.assertTrue("Job diagnostics should contain YarnRuntimeException",job.getDiagnostics().toString().contains("YarnRuntimeException"));
Assert.assertTrue("Job diagnostics should contain " + EXCEPTIONMSG,job.getDiagnostics().toString().contains(EXCEPTIONMSG));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTransitionsAtFailed() throws IOException {
Configuration conf=new Configuration();
AsyncDispatcher dispatcher=new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
OutputCommitter committer=mock(OutputCommitter.class);
doThrow(new IOException("forcefail")).when(committer).setupJob(any(JobContext.class));
CommitterEventHandler commitHandler=createCommitterEventHandler(dispatcher,committer);
commitHandler.init(conf);
commitHandler.start();
AppContext mockContext=mock(AppContext.class);
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false);
JobImpl job=createStubbedJob(conf,dispatcher,2,mockContext);
JobId jobId=job.getID();
job.handle(new JobEvent(jobId,JobEventType.JOB_INIT));
assertJobState(job,JobStateInternal.INITED);
job.handle(new JobStartEvent(jobId));
assertJobState(job,JobStateInternal.FAILED);
job.handle(new JobEvent(jobId,JobEventType.JOB_TASK_COMPLETED));
assertJobState(job,JobStateInternal.FAILED);
job.handle(new JobEvent(jobId,JobEventType.JOB_TASK_ATTEMPT_COMPLETED));
assertJobState(job,JobStateInternal.FAILED);
job.handle(new JobEvent(jobId,JobEventType.JOB_MAP_TASK_RESCHEDULED));
assertJobState(job,JobStateInternal.FAILED);
job.handle(new JobEvent(jobId,JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE));
assertJobState(job,JobStateInternal.FAILED);
Assert.assertEquals(JobState.RUNNING,job.getState());
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
Assert.assertEquals(JobState.FAILED,job.getState());
dispatcher.stop();
commitHandler.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testUberDecision() throws Exception {
Configuration conf=new Configuration();
boolean isUber=testUberDecision(conf);
Assert.assertFalse(isUber);
conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,true);
isUber=testUberDecision(conf);
Assert.assertTrue(isUber);
conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,true);
conf.setInt(MRJobConfig.JOB_UBERTASK_MAXREDUCES,0);
conf.setInt(MRJobConfig.NUM_REDUCES,1);
isUber=testUberDecision(conf);
Assert.assertFalse(isUber);
conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,true);
conf.setInt(MRJobConfig.JOB_UBERTASK_MAXREDUCES,1);
conf.setInt(MRJobConfig.NUM_REDUCES,1);
isUber=testUberDecision(conf);
Assert.assertTrue(isUber);
conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,true);
conf.setInt(MRJobConfig.JOB_UBERTASK_MAXMAPS,1);
isUber=testUberDecision(conf);
Assert.assertFalse(isUber);
conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,true);
conf.setInt(MRJobConfig.NUM_REDUCES,0);
conf.setInt(MRJobConfig.REDUCE_MEMORY_MB,2048);
conf.setInt(MRJobConfig.REDUCE_CPU_VCORES,10);
isUber=testUberDecision(conf);
Assert.assertTrue(isUber);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testReportDiagnostics() throws Exception {
JobID jobID=JobID.forName("job_1234567890000_0001");
JobId jobId=TypeConverter.toYarn(jobID);
final String diagMsg="some diagnostic message";
final JobDiagnosticsUpdateEvent diagUpdateEvent=new JobDiagnosticsUpdateEvent(jobId,diagMsg);
MRAppMetrics mrAppMetrics=MRAppMetrics.create();
AppContext mockContext=mock(AppContext.class);
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
JobImpl job=new JobImpl(jobId,Records.newRecord(ApplicationAttemptId.class),new Configuration(),mock(EventHandler.class),null,mock(JobTokenSecretManager.class),null,new SystemClock(),null,mrAppMetrics,null,true,null,0,null,mockContext,null,null);
job.handle(diagUpdateEvent);
String diagnostics=job.getReport().getDiagnostics();
Assert.assertNotNull(diagnostics);
Assert.assertTrue(diagnostics.contains(diagMsg));
job=new JobImpl(jobId,Records.newRecord(ApplicationAttemptId.class),new Configuration(),mock(EventHandler.class),null,mock(JobTokenSecretManager.class),null,new SystemClock(),null,mrAppMetrics,null,true,null,0,null,mockContext,null,null);
job.handle(new JobEvent(jobId,JobEventType.JOB_KILL));
job.handle(diagUpdateEvent);
diagnostics=job.getReport().getDiagnostics();
Assert.assertNotNull(diagnostics);
Assert.assertTrue(diagnostics.contains(diagMsg));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCheckAccess(){
String user1=System.getProperty("user.name");
String user2=user1 + "1234";
UserGroupInformation ugi1=UserGroupInformation.createRemoteUser(user1);
UserGroupInformation ugi2=UserGroupInformation.createRemoteUser(user2);
JobID jobID=JobID.forName("job_1234567890000_0001");
JobId jobId=TypeConverter.toYarn(jobID);
Configuration conf1=new Configuration();
conf1.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
conf1.set(MRJobConfig.JOB_ACL_VIEW_JOB,"");
JobImpl job1=new JobImpl(jobId,null,conf1,null,null,null,null,null,null,null,null,true,user1,0,null,null,null,null);
Assert.assertTrue(job1.checkAccess(ugi1,JobACL.VIEW_JOB));
Assert.assertFalse(job1.checkAccess(ugi2,JobACL.VIEW_JOB));
Configuration conf2=new Configuration();
conf2.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
conf2.set(MRJobConfig.JOB_ACL_VIEW_JOB,user2);
JobImpl job2=new JobImpl(jobId,null,conf2,null,null,null,null,null,null,null,null,true,user1,0,null,null,null,null);
Assert.assertTrue(job2.checkAccess(ugi1,JobACL.VIEW_JOB));
Assert.assertTrue(job2.checkAccess(ugi2,JobACL.VIEW_JOB));
Configuration conf3=new Configuration();
conf3.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
conf3.set(MRJobConfig.JOB_ACL_VIEW_JOB,"*");
JobImpl job3=new JobImpl(jobId,null,conf3,null,null,null,null,null,null,null,null,true,user1,0,null,null,null,null);
Assert.assertTrue(job3.checkAccess(ugi1,JobACL.VIEW_JOB));
Assert.assertTrue(job3.checkAccess(ugi2,JobACL.VIEW_JOB));
Configuration conf4=new Configuration();
conf4.setBoolean(MRConfig.MR_ACLS_ENABLED,false);
conf4.set(MRJobConfig.JOB_ACL_VIEW_JOB,"");
JobImpl job4=new JobImpl(jobId,null,conf4,null,null,null,null,null,null,null,null,true,user1,0,null,null,null,null);
Assert.assertTrue(job4.checkAccess(ugi1,JobACL.VIEW_JOB));
Assert.assertTrue(job4.checkAccess(ugi2,JobACL.VIEW_JOB));
Configuration conf5=new Configuration();
conf5.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
conf5.set(MRJobConfig.JOB_ACL_VIEW_JOB,"");
JobImpl job5=new JobImpl(jobId,null,conf5,null,null,null,null,null,null,null,null,true,user1,0,null,null,null,null);
Assert.assertTrue(job5.checkAccess(ugi1,null));
Assert.assertTrue(job5.checkAccess(ugi2,null));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testShuffleProviders() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,1);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
Path jobFile=mock(Path.class);
EventHandler eventHandler=mock(EventHandler.class);
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(YarnConfiguration.NM_AUX_SERVICES,TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID + "," + TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID);
String serviceName=TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID;
String serviceStr=String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,serviceName);
jobConf.set(serviceStr,TestShuffleHandler1.class.getName());
serviceName=TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID;
serviceStr=String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,serviceName);
jobConf.set(serviceStr,TestShuffleHandler2.class.getName());
jobConf.set(MRJobConfig.MAPREDUCE_JOB_SHUFFLE_PROVIDER_SERVICES,TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID + "," + TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID);
Credentials credentials=new Credentials();
Token jobToken=new Token(("tokenid").getBytes(),("tokenpw").getBytes(),new Text("tokenkind"),new Text("tokenservice"));
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,mock(TaskSplitMetaInfo.class),jobConf,taListener,jobToken,credentials,new SystemClock(),null);
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,taImpl.getID().toString());
ContainerLaunchContext launchCtx=TaskAttemptImpl.createContainerLaunchContext(null,jobConf,jobToken,taImpl.createRemoteTask(),TypeConverter.fromYarn(jobId),mock(WrappedJvmID.class),taListener,credentials);
Map serviceDataMap=launchCtx.getServiceData();
Assert.assertNotNull("TestShuffleHandler1 is missing",serviceDataMap.get(TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID));
Assert.assertNotNull("TestShuffleHandler2 is missing",serviceDataMap.get(TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID));
Assert.assertTrue("mismatch number of services in map",serviceDataMap.size() == 3);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFetchFailureAttemptFinishTime() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,mock(Token.class),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_DONE));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertEquals("Task attempt is not in succeeded state",taImpl.getState(),TaskAttemptState.SUCCEEDED);
assertTrue("Task Attempt finish time is not greater than 0",taImpl.getFinishTime() > 0);
Long finishTime=taImpl.getFinishTime();
Thread.sleep(5);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
assertEquals("Task attempt is not in Too Many Fetch Failure state",taImpl.getState(),TaskAttemptState.FAILED);
assertEquals("After TA_TOO_MANY_FETCH_FAILURE," + " Task attempt finish time is not the same ",finishTime,Long.valueOf(taImpl.getFinishTime()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDoubleTooManyFetchFailure() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_DONE));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertEquals("Task attempt is not in succeeded state",taImpl.getState(),TaskAttemptState.SUCCEEDED);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
assertEquals("Task attempt is not in FAILED state",taImpl.getState(),TaskAttemptState.FAILED);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
assertEquals("Task attempt is not in FAILED state, still",taImpl.getState(),TaskAttemptState.FAILED);
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLaunchFailedWhileKilling() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),null);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
assertFalse(eventHandler.internalError);
assertEquals("Task attempt is not assigned on the local node",Locality.NODE_LOCAL,taImpl.getLocality());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTooManyFetchFailureAfterKill() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,mock(Token.class),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_DONE));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertEquals("Task attempt is not in succeeded state",taImpl.getState(),TaskAttemptState.SUCCEEDED);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL));
assertEquals("Task attempt is not in KILLED state",taImpl.getState(),TaskAttemptState.KILLED);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
assertEquals("Task attempt is not in KILLED state, still",taImpl.getState(),TaskAttemptState.KILLED);
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerCleanedWhileCommitting() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_COMMIT_PENDING));
assertEquals("Task attempt is not in commit pending state",taImpl.getState(),TaskAttemptState.COMMIT_PENDING);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError);
assertEquals("Task attempt is assigned locally",Locality.OFF_SWITCH,taImpl.getLocality());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerKillWhileCommitPending() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.2",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
assertEquals("Task attempt is not in running state",taImpl.getState(),TaskAttemptState.RUNNING);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_COMMIT_PENDING));
assertEquals("Task should be in COMMIT_PENDING state",TaskAttemptStateInternal.COMMIT_PENDING,taImpl.getInternalState());
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL));
assertFalse("InternalError occurred trying to handle TA_KILL",eventHandler.internalError);
assertEquals("Task should be in KILLED state",TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,taImpl.getInternalState());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testContainerKillAfterAssigned() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.2",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
assertEquals("Task attempt is not in assinged state",taImpl.getInternalState(),TaskAttemptStateInternal.ASSIGNED);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL));
assertEquals("Task should be in KILLED state",TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,taImpl.getInternalState());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerCleanedWhileRunning() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.2",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
assertEquals("Task attempt is not in running state",taImpl.getState(),TaskAttemptState.RUNNING);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError);
assertEquals("Task attempt is not assigned on the local rack",Locality.RACK_LOCAL,taImpl.getLocality());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerKillWhileRunning() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.2",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
assertEquals("Task attempt is not in running state",taImpl.getState(),TaskAttemptState.RUNNING);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL));
assertFalse("InternalError occurred trying to handle TA_KILL",eventHandler.internalError);
assertEquals("Task should be in KILLED state",TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,taImpl.getInternalState());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAttemptContainerRequest() throws Exception {
final Text SECRET_KEY_ALIAS=new Text("secretkeyalias");
final byte[] SECRET_KEY=("secretkey").getBytes();
Map acls=new HashMap(1);
acls.put(ApplicationAccessType.VIEW_APP,"otheruser");
ApplicationId appId=ApplicationId.newInstance(1,1);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
Path jobFile=mock(Path.class);
EventHandler eventHandler=mock(EventHandler.class);
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(jobConf);
Credentials credentials=new Credentials();
credentials.addSecretKey(SECRET_KEY_ALIAS,SECRET_KEY);
Token jobToken=new Token(("tokenid").getBytes(),("tokenpw").getBytes(),new Text("tokenkind"),new Text("tokenservice"));
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,mock(TaskSplitMetaInfo.class),jobConf,taListener,jobToken,credentials,new SystemClock(),null);
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,taImpl.getID().toString());
ContainerLaunchContext launchCtx=TaskAttemptImpl.createContainerLaunchContext(acls,jobConf,jobToken,taImpl.createRemoteTask(),TypeConverter.fromYarn(jobId),mock(WrappedJvmID.class),taListener,credentials);
Assert.assertEquals("ACLs mismatch",acls,launchCtx.getApplicationACLs());
Credentials launchCredentials=new Credentials();
DataInputByteBuffer dibb=new DataInputByteBuffer();
dibb.reset(launchCtx.getTokens());
launchCredentials.readTokenStorageStream(dibb);
for ( Token extends TokenIdentifier> token : credentials.getAllTokens()) {
Token extends TokenIdentifier> launchToken=launchCredentials.getToken(token.getService());
Assert.assertNotNull("Token " + token.getService() + " is missing",launchToken);
Assert.assertEquals("Token " + token.getService() + " mismatch",token,launchToken);
}
Assert.assertNotNull("Secret key missing",launchCredentials.getSecretKey(SECRET_KEY_ALIAS));
Assert.assertTrue("Secret key mismatch",Arrays.equals(SECRET_KEY,launchCredentials.getSecretKey(SECRET_KEY_ALIAS)));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000) public void testPoolLimits() throws InterruptedException {
ApplicationId appId=ApplicationId.newInstance(12345,67);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,3);
JobId jobId=MRBuilderUtils.newJobId(appId,8);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,9,TaskType.MAP);
TaskAttemptId taskAttemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
ContainerId containerId=ContainerId.newInstance(appAttemptId,10);
AppContext context=mock(AppContext.class);
CustomContainerLauncher containerLauncher=new CustomContainerLauncher(context);
Configuration conf=new Configuration();
conf.setInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT,12);
containerLauncher.init(conf);
containerLauncher.start();
ThreadPoolExecutor threadPool=containerLauncher.getThreadPool();
containerLauncher.expectedCorePoolSize=ContainerLauncherImpl.INITIAL_POOL_SIZE;
for (int i=0; i < 10; i++) {
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host" + i + ":1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher,10);
Assert.assertEquals(10,threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.expectedCorePoolSize=12;
for (int i=1; i <= 4; i++) {
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host1" + i + ":1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher,12);
Assert.assertEquals(12,threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.finishEventHandling=true;
waitForEvents(containerLauncher,14);
Assert.assertEquals(12,threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testSlowNM() throws Exception {
conf=new Configuration();
int maxAttempts=1;
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS,maxAttempts);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.setInt("yarn.rpc.nm-command-timeout",3000);
conf.set(YarnConfiguration.IPC_RPC_IMPL,HadoopYarnProtoRPC.class.getName());
YarnRPC rpc=YarnRPC.create(conf);
String bindAddr="localhost:0";
InetSocketAddress addr=NetUtils.createSocketAddr(bindAddr);
NMTokenSecretManagerInNM tokenSecretManager=new NMTokenSecretManagerInNM();
MasterKey masterKey=Records.newRecord(MasterKey.class);
masterKey.setBytes(ByteBuffer.wrap("key".getBytes()));
tokenSecretManager.setMasterKey(masterKey);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"token");
server=rpc.getServer(ContainerManagementProtocol.class,new DummyContainerManager(),addr,conf,tokenSecretManager,1);
server.start();
MRApp app=new MRAppWithSlowNM(tokenSecretManager);
try {
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Map tasks=job.getTasks();
Assert.assertEquals("Num tasks is not correct",1,tasks.size());
Task task=tasks.values().iterator().next();
app.waitForState(task,TaskState.SCHEDULED);
Map attempts=tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct",maxAttempts,attempts.size());
TaskAttempt attempt=attempts.values().iterator().next();
app.waitForInternalState((TaskAttemptImpl)attempt,TaskAttemptStateInternal.ASSIGNED);
app.waitForState(job,JobState.FAILED);
String diagnostics=attempt.getDiagnostics().toString();
LOG.info("attempt.getDiagnostics: " + diagnostics);
Assert.assertTrue(diagnostics.contains("Container launch failed for " + "container_0_0000_01_000000 : "));
Assert.assertTrue(diagnostics.contains("java.net.SocketTimeoutException: 3000 millis timeout while waiting for channel"));
}
finally {
server.stop();
app.stop();
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000) public void testPoolSize() throws InterruptedException {
ApplicationId appId=ApplicationId.newInstance(12345,67);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,3);
JobId jobId=MRBuilderUtils.newJobId(appId,8);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,9,TaskType.MAP);
AppContext context=mock(AppContext.class);
CustomContainerLauncher containerLauncher=new CustomContainerLauncher(context);
containerLauncher.init(new Configuration());
containerLauncher.start();
ThreadPoolExecutor threadPool=containerLauncher.getThreadPool();
Assert.assertEquals(0,threadPool.getPoolSize());
Assert.assertEquals(ContainerLauncherImpl.INITIAL_POOL_SIZE,threadPool.getCorePoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.expectedCorePoolSize=ContainerLauncherImpl.INITIAL_POOL_SIZE;
for (int i=0; i < 10; i++) {
ContainerId containerId=ContainerId.newInstance(appAttemptId,i);
TaskAttemptId taskAttemptId=MRBuilderUtils.newTaskAttemptId(taskId,i);
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host" + i + ":1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher,10);
Assert.assertEquals(10,threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.finishEventHandling=true;
int timeOut=0;
while (containerLauncher.numEventsProcessed.get() < 10 && timeOut++ < 200) {
LOG.info("Waiting for number of events processed to become " + 10 + ". It is now "+ containerLauncher.numEventsProcessed.get()+ ". Timeout is "+ timeOut);
Thread.sleep(1000);
}
Assert.assertEquals(10,containerLauncher.numEventsProcessed.get());
containerLauncher.finishEventHandling=false;
for (int i=0; i < 10; i++) {
ContainerId containerId=ContainerId.newInstance(appAttemptId,i + 10);
TaskAttemptId taskAttemptId=MRBuilderUtils.newTaskAttemptId(taskId,i + 10);
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host" + i + ":1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher,20);
Assert.assertEquals(10,threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.expectedCorePoolSize=11 + ContainerLauncherImpl.INITIAL_POOL_SIZE;
containerLauncher.finishEventHandling=false;
ContainerId containerId=ContainerId.newInstance(appAttemptId,21);
TaskAttemptId taskAttemptId=MRBuilderUtils.newTaskAttemptId(taskId,21);
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host11:1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
waitForEvents(containerLauncher,21);
Assert.assertEquals(11,threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCompletedTasksRecalculateSchedule() throws Exception {
LOG.info("Running testCompletedTasksRecalculateSchedule");
Configuration conf=new Configuration();
final MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job job=mock(Job.class);
when(job.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
doReturn(10).when(job).getTotalMaps();
doReturn(10).when(job).getTotalReduces();
doReturn(0).when(job).getCompletedMaps();
RecalculateContainerAllocator allocator=new RecalculateContainerAllocator(rm,conf,appAttemptId,job);
allocator.schedule();
allocator.recalculatedReduceSchedule=false;
allocator.schedule();
Assert.assertFalse("Unexpected recalculate of reduce schedule",allocator.recalculatedReduceSchedule);
doReturn(1).when(job).getCompletedMaps();
allocator.schedule();
Assert.assertTrue("Expected recalculate of reduce schedule",allocator.recalculatedReduceSchedule);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testReportedAppProgress() throws Exception {
LOG.info("Running testReportedAppProgress");
Configuration conf=new Configuration();
final MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher rmDispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp rmApp=rm.submitApp(1024);
rmDispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",21504);
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
final ApplicationAttemptId appAttemptId=rmApp.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
rmDispatcher.await();
MRApp mrApp=new MRApp(appAttemptId,ContainerId.newInstance(appAttemptId,0),10,10,false,this.getClass().getName(),true,1){
@Override protected Dispatcher createDispatcher(){
return new DrainDispatcher();
}
protected ContainerAllocator createContainerAllocator( ClientService clientService, AppContext context){
return new MyContainerAllocator(rm,appAttemptId,context);
}
}
;
Assert.assertEquals(0.0,rmApp.getProgress(),0.0);
mrApp.submit(conf);
Job job=mrApp.getContext().getAllJobs().entrySet().iterator().next().getValue();
DrainDispatcher amDispatcher=(DrainDispatcher)mrApp.getDispatcher();
MyContainerAllocator allocator=(MyContainerAllocator)mrApp.getContainerAllocator();
mrApp.waitForInternalState((JobImpl)job,JobStateInternal.RUNNING);
amDispatcher.await();
for ( Task t : job.getTasks().values()) {
if (t.getType() == TaskType.MAP) {
mrApp.waitForInternalState((TaskAttemptImpl)t.getAttempts().values().iterator().next(),TaskAttemptStateInternal.UNASSIGNED);
}
}
amDispatcher.await();
allocator.schedule();
rmDispatcher.await();
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
allocator.schedule();
rmDispatcher.await();
for ( Task t : job.getTasks().values()) {
if (t.getType() == TaskType.MAP) {
mrApp.waitForState(t,TaskState.RUNNING);
}
}
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.05f,job.getProgress(),0.001f);
Assert.assertEquals(0.05f,rmApp.getProgress(),0.001f);
Iterator it=job.getTasks().values().iterator();
finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,1);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.095f,job.getProgress(),0.001f);
Assert.assertEquals(0.095f,rmApp.getProgress(),0.001f);
finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,7);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.41f,job.getProgress(),0.001f);
Assert.assertEquals(0.41f,rmApp.getProgress(),0.001f);
finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,2);
allocator.schedule();
rmDispatcher.await();
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
allocator.schedule();
rmDispatcher.await();
for ( Task t : job.getTasks().values()) {
if (t.getType() == TaskType.REDUCE) {
mrApp.waitForState(t,TaskState.RUNNING);
}
}
finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,2);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.59f,job.getProgress(),0.001f);
Assert.assertEquals(0.59f,rmApp.getProgress(),0.001f);
finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,8);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.95f,job.getProgress(),0.001f);
Assert.assertEquals(0.95f,rmApp.getProgress(),0.001f);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testResource() throws Exception {
LOG.info("Running testResource");
Configuration conf=new Configuration();
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nodeManager1=rm.registerNode("h1:1234",10240);
MockNM nodeManager2=rm.registerNode("h2:1234",10240);
MockNM nodeManager3=rm.registerNode("h3:1234",10240);
dispatcher.await();
ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event1);
ContainerRequestEvent event2=createReq(jobId,2,2048,new String[]{"h2"});
allocator.sendRequest(event2);
List assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
nodeManager1.nodeHeartbeat(true);
nodeManager2.nodeHeartbeat(true);
nodeManager3.nodeHeartbeat(true);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
checkAssignments(new ContainerRequestEvent[]{event1,event2},assigned,false);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testBlackListedNodes() throws Exception {
LOG.info("Running testBlackListedNodes");
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1);
conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,-1);
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nodeManager1=rm.registerNode("h1:1234",10240);
MockNM nodeManager2=rm.registerNode("h2:1234",10240);
MockNM nodeManager3=rm.registerNode("h3:1234",10240);
dispatcher.await();
ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event1);
ContainerRequestEvent event2=createReq(jobId,2,1024,new String[]{"h2"});
allocator.sendRequest(event2);
ContainerRequestEvent event3=createReq(jobId,3,1024,new String[]{"h3"});
allocator.sendRequest(event3);
List assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
ContainerFailedEvent f1=createFailEvent(jobId,1,"h1",false);
allocator.sendFailure(f1);
ContainerFailedEvent f2=createFailEvent(jobId,1,"h2",false);
allocator.sendFailure(f2);
nodeManager1.nodeHeartbeat(true);
nodeManager2.nodeHeartbeat(true);
dispatcher.await();
assigned=allocator.schedule();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
assertBlacklistAdditionsAndRemovals(2,0,rm);
nodeManager1.nodeHeartbeat(false);
nodeManager2.nodeHeartbeat(false);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0,0,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
nodeManager3.nodeHeartbeat(true);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0,0,rm);
Assert.assertTrue("No of assignments must be 3",assigned.size() == 3);
for ( TaskAttemptContainerAssignedEvent assig : assigned) {
Assert.assertTrue("Assigned container host not correct","h3".equals(assig.getContainer().getNodeId().getHost()));
}
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMapNodeLocality() throws Exception {
LOG.info("Running testMapNodeLocality");
Configuration conf=new Configuration();
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nodeManager1=rm.registerNode("h1:1234",3072);
rm.registerNode("h2:1234",10240);
MockNM nodeManager3=rm.registerNode("h3:1234",1536);
dispatcher.await();
ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event1);
ContainerRequestEvent event2=createReq(jobId,2,1024,new String[]{"h1"});
allocator.sendRequest(event2);
ContainerRequestEvent event3=createReq(jobId,3,1024,new String[]{"h2"});
allocator.sendRequest(event3);
List assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
nodeManager3.nodeHeartbeat(true);
nodeManager1.nodeHeartbeat(true);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
checkAssignments(new ContainerRequestEvent[]{event1,event2,event3},assigned,false);
for ( TaskAttemptContainerAssignedEvent event : assigned) {
if (event.getTaskAttemptID().equals(event3.getAttemptID())) {
assigned.remove(event);
Assert.assertTrue(event.getContainer().getNodeId().getHost().equals("h3"));
break;
}
}
checkAssignments(new ContainerRequestEvent[]{event1,event2},assigned,true);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testIgnoreBlacklisting() throws Exception {
LOG.info("Running testIgnoreBlacklisting");
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1);
conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,33);
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM[] nodeManagers=new MockNM[10];
int nmNum=0;
List assigned=null;
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
nodeManagers[0].nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
assigned=getContainerOnHost(jobId,1,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
LOG.info("Failing container _1 on H1 (Node should be blacklisted and" + " ignore blacklisting enabled");
ContainerFailedEvent f1=createFailEvent(jobId,1,"h1",false);
allocator.sendFailure(f1);
assigned=getContainerOnHost(jobId,2,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,1,0,0,1,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
assigned=getContainerOnHost(jobId,2,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
assigned=getContainerOnHost(jobId,3,1024,new String[]{"h2"},nodeManagers[1],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
assigned=getContainerOnHost(jobId,4,1024,new String[]{"h3"},nodeManagers[2],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
assigned=getContainerOnHost(jobId,5,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
assigned=getContainerOnHost(jobId,6,1024,new String[]{"h4"},nodeManagers[3],dispatcher,allocator,0,0,1,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
assigned=getContainerOnHost(jobId,7,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
ContainerFailedEvent f2=createFailEvent(jobId,3,"h2",false);
allocator.sendFailure(f2);
assigned=getContainerOnHost(jobId,8,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,1,0,0,2,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
assigned=getContainerOnHost(jobId,8,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 2",2,assigned.size());
assigned=getContainerOnHost(jobId,9,1024,new String[]{"h2"},nodeManagers[1],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
ContainerFailedEvent f3=createFailEvent(jobId,4,"h3",false);
allocator.sendFailure(f3);
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
assigned=getContainerOnHost(jobId,10,1024,new String[]{"h3"},nodeManagers[2],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
for (int i=0; i < 5; i++) {
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
assigned=getContainerOnHost(jobId,11 + i,1024,new String[]{String.valueOf(5 + i)},nodeManagers[4 + i],dispatcher,allocator,0,0,(i == 4 ? 3 : 0),0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
}
assigned=getContainerOnHost(jobId,20,1024,new String[]{"h3"},nodeManagers[2],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testReducerRampdownDiagnostics() throws Exception {
LOG.info("Running tesReducerRampdownDiagnostics");
final Configuration conf=new Configuration();
conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART,0.0f);
final MyResourceManager rm=new MyResourceManager(conf);
rm.start();
final DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
final RMApp app=rm.submitApp(1024);
dispatcher.await();
final String host="host1";
final MockNM nm=rm.registerNode(String.format("%s:1234",host),2048);
nm.nodeHeartbeat(true);
dispatcher.await();
final ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
final JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
final Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
final MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
dispatcher.await();
final String[] locations=new String[]{host};
allocator.sendRequest(createReq(jobId,0,1024,locations,false,true));
for (int i=0; i < 1; ) {
dispatcher.await();
i+=allocator.schedule().size();
nm.nodeHeartbeat(true);
}
allocator.sendRequest(createReq(jobId,0,1024,locations,true,false));
while (allocator.getTaskAttemptKillEvents().size() == 0) {
dispatcher.await();
allocator.schedule().size();
nm.nodeHeartbeat(true);
}
final String killEventMessage=allocator.getTaskAttemptKillEvents().get(0).getMessage();
Assert.assertTrue("No reducer rampDown preemption message",killEventMessage.contains(RMContainerAllocator.RAMPDOWN_DIAGNOSTIC));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testUpdatedNodes() throws Exception {
Configuration conf=new Configuration();
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nm1=rm.registerNode("h1:1234",10240);
MockNM nm2=rm.registerNode("h2:1234",10240);
dispatcher.await();
ContainerRequestEvent event=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event);
TaskAttemptId attemptId=event.getAttemptID();
TaskAttempt mockTaskAttempt=mock(TaskAttempt.class);
when(mockTaskAttempt.getNodeId()).thenReturn(nm1.getNodeId());
Task mockTask=mock(Task.class);
when(mockTask.getAttempt(attemptId)).thenReturn(mockTaskAttempt);
when(mockJob.getTask(attemptId.getTaskId())).thenReturn(mockTask);
List assigned=allocator.schedule();
dispatcher.await();
nm1.nodeHeartbeat(true);
dispatcher.await();
Assert.assertEquals(1,allocator.getJobUpdatedNodeEvents().size());
Assert.assertEquals(3,allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size());
allocator.getJobUpdatedNodeEvents().clear();
assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals(1,assigned.size());
Assert.assertEquals(nm1.getNodeId(),assigned.get(0).getContainer().getNodeId());
Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty());
Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty());
nm1.nodeHeartbeat(false);
nm2.nodeHeartbeat(false);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals(0,assigned.size());
Assert.assertEquals(1,allocator.getJobUpdatedNodeEvents().size());
Assert.assertEquals(1,allocator.getTaskAttemptKillEvents().size());
Assert.assertEquals(2,allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size());
Assert.assertEquals(attemptId,allocator.getTaskAttemptKillEvents().get(0).getTaskAttemptID());
allocator.getJobUpdatedNodeEvents().clear();
allocator.getTaskAttemptKillEvents().clear();
assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals(0,assigned.size());
Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty());
Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testUnregistrationOnlyIfRegistered() throws Exception {
Configuration conf=new Configuration();
final MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher rmDispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp rmApp=rm.submitApp(1024);
rmDispatcher.await();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",11264);
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
final ApplicationAttemptId appAttemptId=rmApp.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
rmDispatcher.await();
MRApp mrApp=new MRApp(appAttemptId,ContainerId.newInstance(appAttemptId,0),10,0,false,this.getClass().getName(),true,1){
@Override protected Dispatcher createDispatcher(){
return new DrainDispatcher();
}
protected ContainerAllocator createContainerAllocator( ClientService clientService, AppContext context){
return new MyContainerAllocator(rm,appAttemptId,context);
}
}
;
mrApp.submit(conf);
DrainDispatcher amDispatcher=(DrainDispatcher)mrApp.getDispatcher();
MyContainerAllocator allocator=(MyContainerAllocator)mrApp.getContainerAllocator();
amDispatcher.await();
Assert.assertTrue(allocator.isApplicationMasterRegistered());
mrApp.stop();
Assert.assertTrue(allocator.isUnregistered());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testBlackListedNodesWithSchedulingToThatNode() throws Exception {
LOG.info("Running testBlackListedNodesWithSchedulingToThatNode");
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1);
conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,-1);
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nodeManager1=rm.registerNode("h1:1234",10240);
MockNM nodeManager3=rm.registerNode("h3:1234",10240);
dispatcher.await();
LOG.info("Requesting 1 Containers _1 on H1");
ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event1);
LOG.info("RM Heartbeat (to send the container requests)");
List assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
LOG.info("h1 Heartbeat (To actually schedule the containers)");
nodeManager1.nodeHeartbeat(true);
dispatcher.await();
LOG.info("RM Heartbeat (To process the scheduled containers)");
assigned=allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
LOG.info("Failing container _1 on H1 (should blacklist the node)");
ContainerFailedEvent f1=createFailEvent(jobId,1,"h1",false);
allocator.sendFailure(f1);
ContainerRequestEvent event1f=createReq(jobId,1,1024,new String[]{"h1"},true,false);
allocator.sendRequest(event1f);
assigned=allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(1,0,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
ContainerRequestEvent event3=createReq(jobId,3,1024,new String[]{"h1","h3"});
allocator.sendRequest(event3);
LOG.info("h1 Heartbeat (To actually schedule the containers)");
nodeManager1.nodeHeartbeat(true);
dispatcher.await();
LOG.info("RM Heartbeat (To process the scheduled containers)");
assigned=allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0,0,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
LOG.info("RM Heartbeat (To process the re-scheduled containers)");
assigned=allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0,0,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
LOG.info("h3 Heartbeat (To re-schedule the containers)");
nodeManager3.nodeHeartbeat(true);
dispatcher.await();
LOG.info("RM Heartbeat (To process the re-scheduled containers for H3)");
assigned=allocator.schedule();
assertBlacklistAdditionsAndRemovals(0,0,rm);
dispatcher.await();
for ( TaskAttemptContainerAssignedEvent assig : assigned) {
LOG.info(assig.getTaskAttemptID() + " assgined to " + assig.getContainer().getId()+ " with priority "+ assig.getContainer().getPriority());
}
Assert.assertEquals("No of assignments must be 2",2,assigned.size());
for ( TaskAttemptContainerAssignedEvent assig : assigned) {
Assert.assertEquals("Assigned container " + assig.getContainer().getId() + " host not correct","h3",assig.getContainer().getNodeId().getHost());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSimple() throws Exception {
LOG.info("Running testSimple");
Configuration conf=new Configuration();
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nodeManager1=rm.registerNode("h1:1234",10240);
MockNM nodeManager2=rm.registerNode("h2:1234",10240);
MockNM nodeManager3=rm.registerNode("h3:1234",10240);
dispatcher.await();
ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event1);
ContainerRequestEvent event2=createReq(jobId,2,1024,new String[]{"h2"});
allocator.sendRequest(event2);
List assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
Assert.assertEquals(4,rm.getMyFifoScheduler().lastAsk.size());
ContainerRequestEvent event3=createReq(jobId,3,1024,new String[]{"h3"});
allocator.sendRequest(event3);
assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
Assert.assertEquals(3,rm.getMyFifoScheduler().lastAsk.size());
nodeManager1.nodeHeartbeat(true);
nodeManager2.nodeHeartbeat(true);
nodeManager3.nodeHeartbeat(true);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals(0,rm.getMyFifoScheduler().lastAsk.size());
checkAssignments(new ContainerRequestEvent[]{event1,event2,event3},assigned,false);
assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals(5,rm.getMyFifoScheduler().lastAsk.size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testCompletedContainerEvent(){
RMContainerAllocator allocator=new RMContainerAllocator(mock(ClientService.class),mock(AppContext.class),new NoopAMPreemptionPolicy());
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(MRBuilderUtils.newTaskId(MRBuilderUtils.newJobId(1,1,1),1,TaskType.MAP),1);
ApplicationId applicationId=ApplicationId.newInstance(1,1);
ApplicationAttemptId applicationAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
ContainerId containerId=ContainerId.newInstance(applicationAttemptId,1);
ContainerStatus status=ContainerStatus.newInstance(containerId,ContainerState.RUNNING,"",0);
ContainerStatus abortedStatus=ContainerStatus.newInstance(containerId,ContainerState.RUNNING,"",ContainerExitStatus.ABORTED);
TaskAttemptEvent event=allocator.createContainerFinishedEvent(status,attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED,event.getType());
TaskAttemptEvent abortedEvent=allocator.createContainerFinishedEvent(abortedStatus,attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_KILL,abortedEvent.getType());
ContainerId containerId2=ContainerId.newInstance(applicationAttemptId,2);
ContainerStatus status2=ContainerStatus.newInstance(containerId2,ContainerState.RUNNING,"",0);
ContainerStatus preemptedStatus=ContainerStatus.newInstance(containerId2,ContainerState.RUNNING,"",ContainerExitStatus.PREEMPTED);
TaskAttemptEvent event2=allocator.createContainerFinishedEvent(status2,attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED,event2.getType());
TaskAttemptEvent abortedEvent2=allocator.createContainerFinishedEvent(preemptedStatus,attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_KILL,abortedEvent2.getType());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testReportedAppProgressWithOnlyMaps() throws Exception {
LOG.info("Running testReportedAppProgressWithOnlyMaps");
Configuration conf=new Configuration();
final MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher rmDispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp rmApp=rm.submitApp(1024);
rmDispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",11264);
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
final ApplicationAttemptId appAttemptId=rmApp.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
rmDispatcher.await();
MRApp mrApp=new MRApp(appAttemptId,ContainerId.newInstance(appAttemptId,0),10,0,false,this.getClass().getName(),true,1){
@Override protected Dispatcher createDispatcher(){
return new DrainDispatcher();
}
protected ContainerAllocator createContainerAllocator( ClientService clientService, AppContext context){
return new MyContainerAllocator(rm,appAttemptId,context);
}
}
;
Assert.assertEquals(0.0,rmApp.getProgress(),0.0);
mrApp.submit(conf);
Job job=mrApp.getContext().getAllJobs().entrySet().iterator().next().getValue();
DrainDispatcher amDispatcher=(DrainDispatcher)mrApp.getDispatcher();
MyContainerAllocator allocator=(MyContainerAllocator)mrApp.getContainerAllocator();
mrApp.waitForInternalState((JobImpl)job,JobStateInternal.RUNNING);
amDispatcher.await();
for ( Task t : job.getTasks().values()) {
mrApp.waitForInternalState((TaskAttemptImpl)t.getAttempts().values().iterator().next(),TaskAttemptStateInternal.UNASSIGNED);
}
amDispatcher.await();
allocator.schedule();
rmDispatcher.await();
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
allocator.schedule();
rmDispatcher.await();
for ( Task t : job.getTasks().values()) {
mrApp.waitForState(t,TaskState.RUNNING);
}
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.05f,job.getProgress(),0.001f);
Assert.assertEquals(0.05f,rmApp.getProgress(),0.001f);
Iterator it=job.getTasks().values().iterator();
finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,1);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.14f,job.getProgress(),0.001f);
Assert.assertEquals(0.14f,rmApp.getProgress(),0.001f);
finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,5);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.59f,job.getProgress(),0.001f);
Assert.assertEquals(0.59f,rmApp.getProgress(),0.001f);
finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,4);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.95f,job.getProgress(),0.001f);
Assert.assertEquals(0.95f,rmApp.getProgress(),0.001f);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRMContainerAllocatorResendsRequestsOnRMRestart() throws Exception {
Configuration conf=new Configuration();
conf.set(YarnConfiguration.RECOVERY_ENABLED,"true");
conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName());
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED,true);
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1);
conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,-1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MyResourceManager rm1=new MyResourceManager(conf,memStore);
rm1.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm1.getRMContext().getDispatcher();
RMApp app=rm1.submitApp(1024);
dispatcher.await();
MockNM nm1=new MockNM("h1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm1.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm1,conf,appAttemptId,mockJob);
ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event1);
ContainerRequestEvent event2=createReq(jobId,2,2048,new String[]{"h1","h2"});
allocator.sendRequest(event2);
ContainerFailedEvent f1=createFailEvent(jobId,1,"h2",false);
allocator.sendFailure(f1);
List assignedContainers=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assignedContainers.size());
assertAsksAndReleases(3,0,rm1);
assertBlacklistAdditionsAndRemovals(1,0,rm1);
nm1.nodeHeartbeat(true);
dispatcher.await();
assignedContainers=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 2",2,assignedContainers.size());
assertAsksAndReleases(0,0,rm1);
assertBlacklistAdditionsAndRemovals(0,0,rm1);
assignedContainers=allocator.schedule();
Assert.assertEquals("No of assignments must be 0",0,assignedContainers.size());
assertAsksAndReleases(3,0,rm1);
assertBlacklistAdditionsAndRemovals(0,0,rm1);
ContainerRequestEvent event3=createReq(jobId,3,1000,new String[]{"h1"});
allocator.sendRequest(event3);
ContainerAllocatorEvent deallocate1=createDeallocateEvent(jobId,1,false);
allocator.sendDeallocate(deallocate1);
assignedContainers=allocator.schedule();
Assert.assertEquals("No of assignments must be 0",0,assignedContainers.size());
assertAsksAndReleases(3,1,rm1);
assertBlacklistAdditionsAndRemovals(0,0,rm1);
MyResourceManager rm2=new MyResourceManager(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
allocator.updateSchedulerProxy(rm2);
dispatcher=(DrainDispatcher)rm2.getRMContext().getDispatcher();
NodeHeartbeatResponse hbResponse=nm1.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction());
nm1=new MockNM("h1:1234",10240,rm2.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true);
dispatcher.await();
ContainerAllocatorEvent deallocate2=createDeallocateEvent(jobId,2,false);
allocator.sendDeallocate(deallocate2);
ContainerFailedEvent f2=createFailEvent(jobId,1,"h3",false);
allocator.sendFailure(f2);
ContainerRequestEvent event4=createReq(jobId,4,2000,new String[]{"h1","h2"});
allocator.sendRequest(event4);
allocator.schedule();
dispatcher.await();
Assert.assertTrue("Last allocate response is not RESYNC",allocator.isResyncCommand());
ContainerRequestEvent event5=createReq(jobId,5,3000,new String[]{"h1","h2","h3"});
allocator.sendRequest(event5);
assignedContainers=allocator.schedule();
dispatcher.await();
assertAsksAndReleases(3,2,rm2);
assertBlacklistAdditionsAndRemovals(2,0,rm2);
nm1.nodeHeartbeat(true);
dispatcher.await();
assignedContainers=allocator.schedule();
dispatcher.await();
Assert.assertEquals("Number of container should be 3",3,assignedContainers.size());
for ( TaskAttemptContainerAssignedEvent assig : assignedContainers) {
Assert.assertTrue("Assigned count not correct","h1".equals(assig.getContainer().getNodeId().getHost()));
}
rm1.stop();
rm2.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMapReduceScheduling() throws Exception {
LOG.info("Running testMapReduceScheduling");
Configuration conf=new Configuration();
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nodeManager1=rm.registerNode("h1:1234",1024);
MockNM nodeManager2=rm.registerNode("h2:1234",10240);
MockNM nodeManager3=rm.registerNode("h3:1234",10240);
dispatcher.await();
ContainerRequestEvent event1=createReq(jobId,1,2048,new String[]{"h1","h2"},true,false);
allocator.sendRequest(event1);
ContainerRequestEvent event2=createReq(jobId,2,3000,new String[]{"h1"},false,true);
allocator.sendRequest(event2);
ContainerRequestEvent event3=createReq(jobId,3,2048,new String[]{"h3"},false,false);
allocator.sendRequest(event3);
List assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
nodeManager1.nodeHeartbeat(true);
nodeManager2.nodeHeartbeat(true);
nodeManager3.nodeHeartbeat(true);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
checkAssignments(new ContainerRequestEvent[]{event1,event3},assigned,false);
for ( TaskAttemptContainerAssignedEvent assig : assigned) {
Assert.assertFalse("Assigned count not correct","h1".equals(assig.getContainer().getNodeId().getHost()));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMRWebAppRedirection() throws Exception {
String[] schemePrefix={WebAppUtils.HTTP_PREFIX,WebAppUtils.HTTPS_PREFIX};
for ( String scheme : schemePrefix) {
MRApp app=new MRApp(2,2,true,this.getClass().getName(),true){
@Override protected ClientService createClientService( AppContext context){
return new MRClientService(context);
}
}
;
Configuration conf=new Configuration();
conf.set(YarnConfiguration.PROXY_ADDRESS,"9.9.9.9");
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,scheme.equals(WebAppUtils.HTTPS_PREFIX) ? Policy.HTTPS_ONLY.name() : Policy.HTTP_ONLY.name());
webProxyBase="/proxy/" + app.getAppID();
conf.set("hadoop.http.filter.initializers",TestAMFilterInitializer.class.getName());
Job job=app.submit(conf);
String hostPort=NetUtils.getHostPortString(((MRClientService)app.getClientService()).getWebApp().getListenerAddress());
URL httpUrl=new URL("http://" + hostPort + "/mapreduce");
HttpURLConnection conn=(HttpURLConnection)httpUrl.openConnection();
conn.setInstanceFollowRedirects(false);
conn.connect();
String expectedURL=scheme + conf.get(YarnConfiguration.PROXY_ADDRESS) + ProxyUriUtils.getPath(app.getAppID(),"/mapreduce");
Assert.assertEquals(expectedURL,conn.getHeaderField(HttpHeaders.LOCATION));
Assert.assertEquals(HttpStatus.SC_MOVED_TEMPORARILY,conn.getResponseCode());
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppControllerIndex(){
AppContext ctx=new MockAppContext(0,1,1,1);
Injector injector=WebAppTests.createMockInjector(AppContext.class,ctx);
AppController controller=injector.getInstance(AppController.class);
controller.index();
assertEquals(ctx.getApplicationID().toString(),controller.get(APP_ID,""));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("taskAttempt");
verifyAMTaskAttempt(info,att,task.getType());
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptsSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyAMTaskAttempts(json,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("taskAttempt");
for (int i=0; i < nodes.getLength(); i++) {
Element element=(Element)nodes.item(i);
verifyAMTaskAttemptXML(element,att,task.getType());
}
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptsXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList attempts=dom.getElementsByTagName("taskAttempts");
assertEquals("incorrect number of elements",1,attempts.getLength());
NodeList nodes=dom.getElementsByTagName("taskAttempt");
verifyAMTaskAttemptsXML(nodes,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttempts() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyAMTaskAttempts(json,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptsDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyAMTaskAttempts(json,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdCounters() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobTaskAttemptCounters");
verifyAMJobTaskAttemptCounters(info,att);
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdXMLCounters() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("jobTaskAttemptCounters");
verifyAMTaskCountersXML(nodes,att);
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptId() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("taskAttempt");
verifyAMTaskAttempt(info,att,task.getType());
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("taskAttempt");
verifyAMTaskAttempt(info,att,task.getType());
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobConfDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("conf").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("conf");
verifyAMJobConf(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobConf() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("conf").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("conf");
verifyAMJobConf(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobConfSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("conf/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("conf");
verifyAMJobConf(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobConfXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("conf").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList info=dom.getElementsByTagName("conf");
verifyAMJobConfXML(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobCountersDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("counters/").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobCounters");
verifyAMJobCounters(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobAttempts() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("jobattempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobAttempts");
verifyJobAttempts(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobIdSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("job");
verifyAMJob(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobCountersXML() throws Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList info=dom.getElementsByTagName("jobCounters");
verifyAMJobCountersXML(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobAttemptsDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("jobattempts").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobAttempts");
verifyJobAttempts(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsXML() throws Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList jobs=dom.getElementsByTagName("jobs");
assertEquals("incorrect number of elements",1,jobs.getLength());
NodeList job=dom.getElementsByTagName("job");
assertEquals("incorrect number of elements",1,job.getLength());
verifyAMJobXML(job,appContext);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobCounters() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobCounters");
verifyAMJobCounters(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobIdDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("job");
verifyAMJob(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobCountersSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobCounters");
verifyAMJobCounters(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobAttemptsXML() throws Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("jobattempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList attempts=dom.getElementsByTagName("jobAttempts");
assertEquals("incorrect number of elements",1,attempts.getLength());
NodeList info=dom.getElementsByTagName("jobAttempt");
verifyJobAttemptsXML(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobId() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("job");
verifyAMJob(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobAttemptsSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("jobattempts/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobAttempts");
verifyJobAttempts(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobIdXML() throws Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList job=dom.getElementsByTagName("job");
verifyAMJobXML(job,appContext);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdCountersSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobTaskCounters");
verifyAMJobTaskCounters(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("task");
verifyAMSingleTask(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksQueryMap() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String type="m";
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",type).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",1,arr.length());
verifyAMTask(arr,jobsMap.get(id),type);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobTaskCountersXML() throws Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList info=dom.getElementsByTagName("jobTaskCounters");
verifyAMTaskCountersXML(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList tasks=dom.getElementsByTagName("tasks");
assertEquals("incorrect number of elements",1,tasks.getLength());
NodeList task=dom.getElementsByTagName("task");
verifyAMTaskXML(task,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdCounters() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobTaskCounters");
verifyAMJobTaskCounters(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasks() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",2,arr.length());
verifyAMTask(arr,jobsMap.get(id),null);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",2,arr.length());
verifyAMTask(arr,jobsMap.get(id),null);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",2,arr.length());
verifyAMTask(arr,jobsMap.get(id),null);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskId() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("task");
verifyAMSingleTask(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("task");
verifyAMSingleTask(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdCountersDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobTaskCounters");
verifyAMJobTaskCounters(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksQueryReduce() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String type="r";
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",type).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",1,arr.length());
verifyAMTask(arr,jobsMap.get(id),type);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("task");
for (int i=0; i < nodes.getLength(); i++) {
Element element=(Element)nodes.item(i);
verifyAMSingleTaskXML(element,task);
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test the method 'info'.
*/
@Test public void testInfo(){
appController.info();
Iterator iterator=appController.getResponseInfo().iterator();
ResponseInfo.Item item=iterator.next();
assertEquals("Application ID:",item.key);
assertEquals("application_0_0000",item.value);
item=iterator.next();
assertEquals("Application Name:",item.key);
assertEquals("AppName",item.value);
item=iterator.next();
assertEquals("User:",item.key);
assertEquals("User",item.value);
item=iterator.next();
assertEquals("Started on:",item.key);
item=iterator.next();
assertEquals("Elasped: ",item.key);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRecovery() throws IOException {
Configuration conf=new Configuration();
HistoryServerStateStoreService store=new HistoryServerMemStateStoreService();
store.init(conf);
store.start();
JHSDelegationTokenSecretManagerForTest mgr=new JHSDelegationTokenSecretManagerForTest(store);
mgr.startThreads();
MRDelegationTokenIdentifier tokenId1=new MRDelegationTokenIdentifier(new Text("tokenOwner"),new Text("tokenRenewer"),new Text("tokenUser"));
Token token1=new Token(tokenId1,mgr);
MRDelegationTokenIdentifier tokenId2=new MRDelegationTokenIdentifier(new Text("tokenOwner"),new Text("tokenRenewer"),new Text("tokenUser"));
Token token2=new Token(tokenId2,mgr);
DelegationKey[] keys=mgr.getAllKeys();
long tokenRenewDate1=mgr.getAllTokens().get(tokenId1).getRenewDate();
long tokenRenewDate2=mgr.getAllTokens().get(tokenId2).getRenewDate();
mgr.stopThreads();
mgr=new JHSDelegationTokenSecretManagerForTest(store);
mgr.recover(store.loadState());
List recoveredKeys=Arrays.asList(mgr.getAllKeys());
for ( DelegationKey key : keys) {
assertTrue("key missing after recovery",recoveredKeys.contains(key));
}
assertTrue("token1 missing",mgr.getAllTokens().containsKey(tokenId1));
assertEquals("token1 renew date",tokenRenewDate1,mgr.getAllTokens().get(tokenId1).getRenewDate());
assertTrue("token2 missing",mgr.getAllTokens().containsKey(tokenId2));
assertEquals("token2 renew date",tokenRenewDate2,mgr.getAllTokens().get(tokenId2).getRenewDate());
mgr.startThreads();
mgr.verifyToken(tokenId1,token1.getPassword());
mgr.verifyToken(tokenId2,token2.getPassword());
MRDelegationTokenIdentifier tokenId3=new MRDelegationTokenIdentifier(new Text("tokenOwner"),new Text("tokenRenewer"),new Text("tokenUser"));
Token token3=new Token(tokenId3,mgr);
assertEquals("sequence number restore",tokenId2.getSequenceNumber() + 1,tokenId3.getSequenceNumber());
mgr.cancelToken(token1,"tokenOwner");
MRDelegationTokenIdentifier tokenIdFull=new MRDelegationTokenIdentifier(new Text("tokenOwner/localhost@LOCALHOST"),new Text("tokenRenewer"),new Text("tokenUser"));
KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]");
Token tokenFull=new Token(tokenIdFull,mgr);
try {
mgr.cancelToken(tokenFull,"tokenOwner");
}
catch ( AccessControlException ace) {
assertTrue(ace.getMessage().contains("is not authorized to cancel the token"));
}
mgr.cancelToken(tokenFull,tokenIdFull.getOwner().toString());
long tokenRenewDate3=mgr.getAllTokens().get(tokenId3).getRenewDate();
mgr.stopThreads();
mgr=new JHSDelegationTokenSecretManagerForTest(store);
mgr.recover(store.loadState());
assertFalse("token1 should be missing",mgr.getAllTokens().containsKey(tokenId1));
assertTrue("token2 missing",mgr.getAllTokens().containsKey(tokenId2));
assertEquals("token2 renew date",tokenRenewDate2,mgr.getAllTokens().get(tokenId2).getRenewDate());
assertTrue("token3 missing",mgr.getAllTokens().containsKey(tokenId3));
assertEquals("token3 renew date",tokenRenewDate3,mgr.getAllTokens().get(tokenId3).getRenewDate());
mgr.startThreads();
mgr.verifyToken(tokenId2,token2.getPassword());
mgr.verifyToken(tokenId3,token3.getPassword());
mgr.stopThreads();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testCompletedTask() throws Exception {
HistoryFileInfo info=mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
completedJob=new CompletedJob(conf,jobId,fulleHistoryPath,loadTasks,"user",info,jobAclsManager);
TaskId mt1Id=MRBuilderUtils.newTaskId(jobId,0,TaskType.MAP);
TaskId rt1Id=MRBuilderUtils.newTaskId(jobId,0,TaskType.REDUCE);
Map mapTasks=completedJob.getTasks(TaskType.MAP);
Map reduceTasks=completedJob.getTasks(TaskType.REDUCE);
assertEquals(10,mapTasks.size());
assertEquals(2,reduceTasks.size());
Task mt1=mapTasks.get(mt1Id);
assertEquals(1,mt1.getAttempts().size());
assertEquals(TaskState.SUCCEEDED,mt1.getState());
TaskReport mt1Report=mt1.getReport();
assertEquals(TaskState.SUCCEEDED,mt1Report.getTaskState());
assertEquals(mt1Id,mt1Report.getTaskId());
Task rt1=reduceTasks.get(rt1Id);
assertEquals(1,rt1.getAttempts().size());
assertEquals(TaskState.SUCCEEDED,rt1.getState());
TaskReport rt1Report=rt1.getReport();
assertEquals(TaskState.SUCCEEDED,rt1Report.getTaskState());
assertEquals(rt1Id,rt1Report.getTaskId());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testCompletedTaskAttempt() throws Exception {
HistoryFileInfo info=mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
completedJob=new CompletedJob(conf,jobId,fulleHistoryPath,loadTasks,"user",info,jobAclsManager);
TaskId mt1Id=MRBuilderUtils.newTaskId(jobId,0,TaskType.MAP);
TaskId rt1Id=MRBuilderUtils.newTaskId(jobId,0,TaskType.REDUCE);
TaskAttemptId mta1Id=MRBuilderUtils.newTaskAttemptId(mt1Id,0);
TaskAttemptId rta1Id=MRBuilderUtils.newTaskAttemptId(rt1Id,0);
Task mt1=completedJob.getTask(mt1Id);
Task rt1=completedJob.getTask(rt1Id);
TaskAttempt mta1=mt1.getAttempt(mta1Id);
assertEquals(TaskAttemptState.SUCCEEDED,mta1.getState());
assertEquals("localhost:45454",mta1.getAssignedContainerMgrAddress());
assertEquals("localhost:9999",mta1.getNodeHttpAddress());
TaskAttemptReport mta1Report=mta1.getReport();
assertEquals(TaskAttemptState.SUCCEEDED,mta1Report.getTaskAttemptState());
assertEquals("localhost",mta1Report.getNodeManagerHost());
assertEquals(45454,mta1Report.getNodeManagerPort());
assertEquals(9999,mta1Report.getNodeManagerHttpPort());
TaskAttempt rta1=rt1.getAttempt(rta1Id);
assertEquals(TaskAttemptState.SUCCEEDED,rta1.getState());
assertEquals("localhost:45454",rta1.getAssignedContainerMgrAddress());
assertEquals("localhost:9999",rta1.getNodeHttpAddress());
TaskAttemptReport rta1Report=rta1.getReport();
assertEquals(TaskAttemptState.SUCCEEDED,rta1Report.getTaskAttemptState());
assertEquals("localhost",rta1Report.getNodeManagerHost());
assertEquals(45454,rta1Report.getNodeManagerPort());
assertEquals(9999,rta1Report.getNodeManagerHttpPort());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=60000) public void testCountersForFailedTask() throws Exception {
LOG.info("STARTING testCountersForFailedTask");
try {
Configuration conf=new Configuration();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app=new MRAppWithHistoryWithFailedTask(2,1,true,this.getClass().getName(),true);
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
app.waitForState(job,JobState.FAILED);
app.waitForState(Service.STATE.STOPPED);
JobHistory jobHistory=new JobHistory();
jobHistory.init(conf);
HistoryFileInfo fileInfo=jobHistory.getJobFileInfo(jobId);
JobHistoryParser parser;
JobInfo jobInfo;
synchronized (fileInfo) {
Path historyFilePath=fileInfo.getHistoryFile();
FSDataInputStream in=null;
FileContext fc=null;
try {
fc=FileContext.getFileContext(conf);
in=fc.open(fc.makeQualified(historyFilePath));
}
catch ( IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath,ioe);
throw (new Exception("Can not open History File"));
}
parser=new JobHistoryParser(in);
jobInfo=parser.parse();
}
Exception parseException=parser.getParseException();
Assert.assertNull("Caught an expected exception " + parseException,parseException);
for ( Map.Entry entry : jobInfo.getAllTasks().entrySet()) {
TaskId yarnTaskID=TypeConverter.toYarn(entry.getKey());
CompletedTask ct=new CompletedTask(yarnTaskID,entry.getValue());
Assert.assertNotNull("completed task report has null counters",ct.getReport().getCounters());
}
final List originalDiagnostics=job.getDiagnostics();
final String historyError=jobInfo.getErrorInfo();
assertTrue("No original diagnostics for a failed job",originalDiagnostics != null && !originalDiagnostics.isEmpty());
assertNotNull("No history error info for a failed job ",historyError);
for ( String diagString : originalDiagnostics) {
assertTrue(historyError.contains(diagString));
}
}
finally {
LOG.info("FINISHED testCountersForFailedTask");
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testHistoryParsingForFailedAttempts() throws Exception {
LOG.info("STARTING testHistoryParsingForFailedAttempts");
try {
Configuration conf=new Configuration();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app=new MRAppWithHistoryWithFailedAttempt(2,1,true,this.getClass().getName(),true);
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
app.waitForState(job,JobState.SUCCEEDED);
app.waitForState(Service.STATE.STOPPED);
JobHistory jobHistory=new JobHistory();
jobHistory.init(conf);
HistoryFileInfo fileInfo=jobHistory.getJobFileInfo(jobId);
JobHistoryParser parser;
JobInfo jobInfo;
synchronized (fileInfo) {
Path historyFilePath=fileInfo.getHistoryFile();
FSDataInputStream in=null;
FileContext fc=null;
try {
fc=FileContext.getFileContext(conf);
in=fc.open(fc.makeQualified(historyFilePath));
}
catch ( IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath,ioe);
throw (new Exception("Can not open History File"));
}
parser=new JobHistoryParser(in);
jobInfo=parser.parse();
}
Exception parseException=parser.getParseException();
Assert.assertNull("Caught an expected exception " + parseException,parseException);
int noOffailedAttempts=0;
Map allTasks=jobInfo.getAllTasks();
for ( Task task : job.getTasks().values()) {
TaskInfo taskInfo=allTasks.get(TypeConverter.fromYarn(task.getID()));
for ( TaskAttempt taskAttempt : task.getAttempts().values()) {
TaskAttemptInfo taskAttemptInfo=taskInfo.getAllTaskAttempts().get(TypeConverter.fromYarn((taskAttempt.getID())));
Assert.assertEquals("rack-name is incorrect",taskAttemptInfo.getRackname(),RACK_NAME);
if (taskAttemptInfo.getTaskStatus().equals("FAILED")) {
noOffailedAttempts++;
}
}
}
Assert.assertEquals("No of Failed tasks doesn't match.",2,noOffailedAttempts);
}
finally {
LOG.info("FINISHED testHistoryParsingForFailedAttempts");
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRefreshUserToGroupsMappings() throws Exception {
String[] args=new String[]{"-refreshUserToGroupsMappings"};
Groups groups=Groups.getUserToGroupsMappingService(conf);
String user=UserGroupInformation.getCurrentUser().getUserName();
System.out.println("first attempt:");
List g1=groups.getGroups(user);
String[] str_groups=new String[g1.size()];
g1.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
System.out.println("second attempt, should be same:");
List g2=groups.getGroups(user);
g2.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g2.size(); i++) {
assertEquals("Should be same group ",g1.get(i),g2.get(i));
}
hsAdminClient.run(args);
System.out.println("third attempt(after refresh command), should be different:");
List g3=groups.getGroups(user);
g3.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g3.size(); i++) {
assertFalse("Should be different group: " + g1.get(i) + " and "+ g3.get(i),g1.get(i).equals(g3.get(i)));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* test HsController
*/
@Test public void testHsController() throws Exception {
AppContext ctx=mock(AppContext.class);
ApplicationId appId=ApplicationIdPBImpl.newInstance(0,5);
when(ctx.getApplicationID()).thenReturn(appId);
AppForTest app=new AppForTest(ctx);
Configuration config=new Configuration();
RequestContext requestCtx=mock(RequestContext.class);
HsControllerForTest controller=new HsControllerForTest(app,config,requestCtx);
controller.index();
assertEquals("JobHistory",controller.get(Params.TITLE,""));
assertEquals(HsJobPage.class,controller.jobPage());
assertEquals(HsCountersPage.class,controller.countersPage());
assertEquals(HsTasksPage.class,controller.tasksPage());
assertEquals(HsTaskPage.class,controller.taskPage());
assertEquals(HsAttemptsPage.class,controller.attemptsPage());
controller.set(AMParams.JOB_ID,"job_01_01");
controller.set(AMParams.TASK_ID,"task_01_01_m01_01");
controller.set(AMParams.TASK_TYPE,"m");
controller.set(AMParams.ATTEMPT_STATE,"State");
Job job=mock(Job.class);
Task task=mock(Task.class);
when(job.getTask(any(TaskId.class))).thenReturn(task);
JobId jobID=MRApps.toJobID("job_01_01");
when(ctx.getJob(jobID)).thenReturn(job);
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(true);
controller.job();
assertEquals(HsJobPage.class,controller.getClazz());
controller.jobCounters();
assertEquals(HsCountersPage.class,controller.getClazz());
controller.taskCounters();
assertEquals(HsCountersPage.class,controller.getClazz());
controller.tasks();
assertEquals(HsTasksPage.class,controller.getClazz());
controller.task();
assertEquals(HsTaskPage.class,controller.getClazz());
controller.attempts();
assertEquals(HsAttemptsPage.class,controller.getClazz());
assertEquals(HsConfPage.class,controller.confPage());
assertEquals(HsAboutPage.class,controller.aboutPage());
controller.about();
assertEquals(HsAboutPage.class,controller.getClazz());
controller.logs();
assertEquals(HsLogsPage.class,controller.getClazz());
controller.nmlogs();
assertEquals(AggregatedLogsPage.class,controller.getClazz());
assertEquals(HsSingleCounterPage.class,controller.singleCounterPage());
controller.singleJobCounter();
assertEquals(HsSingleCounterPage.class,controller.getClazz());
controller.singleTaskCounter();
assertEquals(HsSingleCounterPage.class,controller.getClazz());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppControllerIndex(){
MockAppContext ctx=new MockAppContext(0,1,1,1);
Injector injector=WebAppTests.createMockInjector(AppContext.class,ctx);
HsController controller=injector.getInstance(HsController.class);
controller.index();
assertEquals(ctx.getApplicationID().toString(),controller.get(APP_ID,""));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptsSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyHsTaskAttempts(json,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdXMLCounters() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("jobTaskAttemptCounters");
verifyHsTaskCountersXML(nodes,att);
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("taskAttempt");
verifyHsTaskAttempt(info,att,task.getType());
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptsDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyHsTaskAttempts(json,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptsXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList attempts=dom.getElementsByTagName("taskAttempts");
assertEquals("incorrect number of elements",1,attempts.getLength());
NodeList nodes=dom.getElementsByTagName("taskAttempt");
verifyHsTaskAttemptsXML(nodes,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdCounters() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobTaskAttemptCounters");
verifyHsJobTaskAttemptCounters(info,att);
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttempts() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyHsTaskAttempts(json,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptId() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("taskAttempt");
verifyHsTaskAttempt(info,att,task.getType());
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("taskAttempt");
for (int i=0; i < nodes.getLength(); i++) {
Element element=(Element)nodes.item(i);
verifyHsTaskAttemptXML(element,att,task.getType());
}
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("taskAttempt");
verifyHsTaskAttempt(info,att,task.getType());
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobConfSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("conf/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("conf");
verifyHsJobConf(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobConf() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("conf").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("conf");
verifyHsJobConf(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobConfDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("conf").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("conf");
verifyHsJobConf(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobConfXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("conf").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList info=dom.getElementsByTagName("conf");
verifyHsJobConfXML(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobAttemptsDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("jobattempts").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobAttempts");
verifyHsJobAttempts(info,appContext.getJob(id));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJobCountersForKilledJob() throws Exception {
WebResource r=resource();
appContext=new MockHistoryContext(0,1,1,1,true);
injector=Guice.createInjector(new ServletModule(){
@Override protected void configureServlets(){
webApp=mock(HsWebApp.class);
when(webApp.name()).thenReturn("hsmockwebapp");
bind(JAXBContextResolver.class);
bind(HsWebServices.class);
bind(GenericExceptionHandler.class);
bind(WebApp.class).toInstance(webApp);
bind(AppContext.class).toInstance(appContext);
bind(HistoryContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
}
);
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobCounters");
WebServicesTestUtils.checkStringMatch("id",MRApps.toString(id),info.getString("id"));
assertTrue("Job shouldn't contain any counters",info.length() == 1);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobAttempts() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("jobattempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobAttempts");
verifyHsJobAttempts(info,appContext.getJob(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobCountersDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters/").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobCounters");
verifyHsJobCounters(info,appContext.getJob(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobIdSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("job");
VerifyJobsUtils.verifyHsJob(info,appContext.getJob(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobAttemptsSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("jobattempts/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobAttempts");
verifyHsJobAttempts(info,appContext.getJob(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobId() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("job");
VerifyJobsUtils.verifyHsJob(info,appContext.getJob(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobIdXML() throws Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList job=dom.getElementsByTagName("job");
verifyHsJobXML(job,appContext);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobCounters() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobCounters");
verifyHsJobCounters(info,appContext.getJob(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobCountersSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobCounters");
verifyHsJobCounters(info,appContext.getJob(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobAttemptsXML() throws Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("jobattempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList attempts=dom.getElementsByTagName("jobAttempts");
assertEquals("incorrect number of elements",1,attempts.getLength());
NodeList info=dom.getElementsByTagName("jobAttempt");
verifyHsJobAttemptsXML(info,appContext.getJob(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsXML() throws Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList jobs=dom.getElementsByTagName("jobs");
assertEquals("incorrect number of elements",1,jobs.getLength());
NodeList job=dom.getElementsByTagName("job");
assertEquals("incorrect number of elements",1,job.getLength());
verifyHsJobPartialXML(job,appContext);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobIdDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("job");
VerifyJobsUtils.verifyHsJob(info,appContext.getJob(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobCountersXML() throws Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList info=dom.getElementsByTagName("jobCounters");
verifyHsJobCountersXML(info,appContext.getJob(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryFinishTimeBegin() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(now)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",3,arr.length());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJobsQueryFinishTimeBeginEnd() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
int size=jobsMap.size();
ArrayList finishTime=new ArrayList(size);
for ( Map.Entry entry : jobsMap.entrySet()) {
finishTime.add(entry.getValue().getReport().getFinishTime());
}
Collections.sort(finishTime);
assertTrue("Error we must have atleast 3 jobs",size >= 3);
long midFinishTime=finishTime.get(size - 2);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(40000)).queryParam("finishedTimeEnd",String.valueOf(midFinishTime)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",size - 1,arr.length());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryStartTimeEnd() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeEnd",String.valueOf(now)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",3,arr.length());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJobsQueryStartTimeBeginEnd() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
int size=jobsMap.size();
ArrayList startTime=new ArrayList(size);
for ( Map.Entry entry : jobsMap.entrySet()) {
startTime.add(entry.getValue().getReport().getStartTime());
}
Collections.sort(startTime);
assertTrue("Error we must have atleast 3 jobs",size >= 3);
long midStartTime=startTime.get(size - 2);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(40000)).queryParam("startedTimeEnd",String.valueOf(midStartTime)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",size - 1,arr.length());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryFinishTimeBeginEndInvalid() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(now)).queryParam("finishedTimeEnd",String.valueOf(40000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: finishedTimeEnd must be greater than finishedTimeBegin",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryState() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
String queryState="BOGUS";
JobId jid=null;
for ( Map.Entry entry : jobsMap.entrySet()) {
jid=entry.getValue().getID();
queryState=entry.getValue().getState().toString();
break;
}
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("state",queryState).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",1,arr.length());
JSONObject info=arr.getJSONObject(0);
Job job=appContext.getPartialJob(jid);
VerifyJobsUtils.verifyHsJobPartial(info,job);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryStartTimeBegin() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(now)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJobsQueryStateNone() throws JSONException, Exception {
WebResource r=resource();
ArrayList JOB_STATES=new ArrayList(Arrays.asList(JobState.values()));
Map jobsMap=appContext.getAllJobs();
for ( Map.Entry entry : jobsMap.entrySet()) {
JOB_STATES.remove(entry.getValue().getState());
}
assertTrue("No unused job states",JOB_STATES.size() > 0);
JobState notInUse=JOB_STATES.get(0);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("state",notInUse.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryFinishTimeEnd() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeEnd",String.valueOf(now)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryStartTimeBeginEndInvalid() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(now)).queryParam("startedTimeEnd",String.valueOf(40000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: startedTimeEnd must be greater than startTimeBegin",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdCounters() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobTaskCounters");
verifyHsJobTaskCounters(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdCountersSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobTaskCounters");
verifyHsJobTaskCounters(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksQueryReduce() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String type="r";
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",type).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",1,arr.length());
verifyHsTask(arr,jobsMap.get(id),type);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobTaskCountersXML() throws Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList info=dom.getElementsByTagName("jobTaskCounters");
verifyHsTaskCountersXML(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdCountersDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobTaskCounters");
verifyHsJobTaskCounters(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList tasks=dom.getElementsByTagName("tasks");
assertEquals("incorrect number of elements",1,tasks.getLength());
NodeList task=dom.getElementsByTagName("task");
verifyHsTaskXML(task,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",2,arr.length());
verifyHsTask(arr,jobsMap.get(id),null);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskId() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("task");
verifyHsSingleTask(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("task");
verifyHsSingleTask(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",2,arr.length());
verifyHsTask(arr,jobsMap.get(id),null);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasks() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",2,arr.length());
verifyHsTask(arr,jobsMap.get(id),null);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("task");
verifyHsSingleTask(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksQueryMap() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String type="m";
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",type).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",1,arr.length());
verifyHsTask(arr,jobsMap.get(id),type);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("task");
for (int i=0; i < nodes.getLength(); i++) {
Element element=(Element)nodes.item(i);
verifyHsSingleTaskXML(element,task);
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testAverageMergeTime() throws IOException {
String historyFileName="job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist";
String confFileName="job_1329348432655_0001_conf.xml";
Configuration conf=new Configuration();
JobACLsManager jobAclsMgr=new JobACLsManager(conf);
Path fulleHistoryPath=new Path(TestJobHistoryEntities.class.getClassLoader().getResource(historyFileName).getFile());
Path fullConfPath=new Path(TestJobHistoryEntities.class.getClassLoader().getResource(confFileName).getFile());
HistoryFileInfo info=mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
JobId jobId=MRBuilderUtils.newJobId(1329348432655l,1,1);
CompletedJob completedJob=new CompletedJob(conf,jobId,fulleHistoryPath,true,"user",info,jobAclsMgr);
JobInfo jobInfo=new JobInfo(completedJob);
Assert.assertEquals(50L,jobInfo.getAvgMergeTime().longValue());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testJobHistoryFileNameBackwardsCompatible() throws IOException {
JobID oldJobId=JobID.forName(JOB_ID);
JobId jobId=TypeConverter.toYarn(oldJobId);
long submitTime=Long.parseLong(SUBMIT_TIME);
long finishTime=Long.parseLong(FINISH_TIME);
int numMaps=Integer.parseInt(NUM_MAPS);
int numReduces=Integer.parseInt(NUM_REDUCES);
String jobHistoryFile=String.format(OLD_JOB_HISTORY_FILE_FORMATTER,JOB_ID,SUBMIT_TIME,USER_NAME,JOB_NAME,FINISH_TIME,NUM_MAPS,NUM_REDUCES,JOB_STATUS);
JobIndexInfo info=FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("Job id incorrect after decoding old history file",jobId,info.getJobId());
Assert.assertEquals("Submit time incorrect after decoding old history file",submitTime,info.getSubmitTime());
Assert.assertEquals("User incorrect after decoding old history file",USER_NAME,info.getUser());
Assert.assertEquals("Job name incorrect after decoding old history file",JOB_NAME,info.getJobName());
Assert.assertEquals("Finish time incorrect after decoding old history file",finishTime,info.getFinishTime());
Assert.assertEquals("Num maps incorrect after decoding old history file",numMaps,info.getNumMaps());
Assert.assertEquals("Num reduces incorrect after decoding old history file",numReduces,info.getNumReduces());
Assert.assertEquals("Job status incorrect after decoding old history file",JOB_STATUS,info.getJobStatus());
Assert.assertNull("Queue name incorrect after decoding old history file",info.getQueueName());
}
APIUtilityVerifier EqualityVerifier
@Test public void testUserNamePercentDecoding() throws IOException {
String jobHistoryFile=String.format(JOB_HISTORY_FILE_FORMATTER,JOB_ID,SUBMIT_TIME,USER_NAME_WITH_DELIMITER_ESCAPE,JOB_NAME,FINISH_TIME,NUM_MAPS,NUM_REDUCES,JOB_STATUS,QUEUE_NAME,JOB_START_TIME);
JobIndexInfo info=FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("User name doesn't match",USER_NAME_WITH_DELIMITER,info.getUser());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testEncodingDecodingEquivalence() throws IOException {
JobIndexInfo info=new JobIndexInfo();
JobID oldJobId=JobID.forName(JOB_ID);
JobId jobId=TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME);
info.setJobName(JOB_NAME);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile=FileNameIndexUtils.getDoneFileName(info);
JobIndexInfo parsedInfo=FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("Job id different after encoding and decoding",info.getJobId(),parsedInfo.getJobId());
Assert.assertEquals("Submit time different after encoding and decoding",info.getSubmitTime(),parsedInfo.getSubmitTime());
Assert.assertEquals("User different after encoding and decoding",info.getUser(),parsedInfo.getUser());
Assert.assertEquals("Job name different after encoding and decoding",info.getJobName(),parsedInfo.getJobName());
Assert.assertEquals("Finish time different after encoding and decoding",info.getFinishTime(),parsedInfo.getFinishTime());
Assert.assertEquals("Num maps different after encoding and decoding",info.getNumMaps(),parsedInfo.getNumMaps());
Assert.assertEquals("Num reduces different after encoding and decoding",info.getNumReduces(),parsedInfo.getNumReduces());
Assert.assertEquals("Job status different after encoding and decoding",info.getJobStatus(),parsedInfo.getJobStatus());
Assert.assertEquals("Queue name different after encoding and decoding",info.getQueueName(),parsedInfo.getQueueName());
Assert.assertEquals("Job start time different after encoding and decoding",info.getJobStartTime(),parsedInfo.getJobStartTime());
}
APIUtilityVerifier EqualityVerifier
@Test public void testJobNamePercentDecoding() throws IOException {
String jobHistoryFile=String.format(JOB_HISTORY_FILE_FORMATTER,JOB_ID,SUBMIT_TIME,USER_NAME,JOB_NAME_WITH_DELIMITER_ESCAPE,FINISH_TIME,NUM_MAPS,NUM_REDUCES,JOB_STATUS,QUEUE_NAME,JOB_START_TIME);
JobIndexInfo info=FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("Job name doesn't match",JOB_NAME_WITH_DELIMITER,info.getJobName());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testUserNamePercentEncoding() throws IOException {
JobIndexInfo info=new JobIndexInfo();
JobID oldJobId=JobID.forName(JOB_ID);
JobId jobId=TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME_WITH_DELIMITER);
info.setJobName(JOB_NAME);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile=FileNameIndexUtils.getDoneFileName(info);
Assert.assertTrue("User name not encoded correctly into job history file",jobHistoryFile.contains(USER_NAME_WITH_DELIMITER_ESCAPE));
}
APIUtilityVerifier EqualityVerifier
@Test public void testJobStartTimeBackwardsCompatible() throws IOException {
String jobHistoryFile=String.format(OLD_FORMAT_BEFORE_ADD_START_TIME,JOB_ID,SUBMIT_TIME,USER_NAME,JOB_NAME_WITH_DELIMITER_ESCAPE,FINISH_TIME,NUM_MAPS,NUM_REDUCES,JOB_STATUS,QUEUE_NAME);
JobIndexInfo info=FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals(info.getJobStartTime(),info.getSubmitTime());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testQueueNamePercentEncoding() throws IOException {
JobIndexInfo info=new JobIndexInfo();
JobID oldJobId=JobID.forName(JOB_ID);
JobId jobId=TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME);
info.setJobName(JOB_NAME);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME_WITH_DELIMITER);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile=FileNameIndexUtils.getDoneFileName(info);
Assert.assertTrue("Queue name not encoded correctly into job history file",jobHistoryFile.contains(QUEUE_NAME_WITH_DELIMITER_ESCAPE));
}
APIUtilityVerifier EqualityVerifier
@Test public void testQueueNamePercentDecoding() throws IOException {
String jobHistoryFile=String.format(JOB_HISTORY_FILE_FORMATTER,JOB_ID,SUBMIT_TIME,USER_NAME,JOB_NAME,FINISH_TIME,NUM_MAPS,NUM_REDUCES,JOB_STATUS,QUEUE_NAME_WITH_DELIMITER_ESCAPE,JOB_START_TIME);
JobIndexInfo info=FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("Queue name doesn't match",QUEUE_NAME_WITH_DELIMITER,info.getQueueName());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testJobNamePercentEncoding() throws IOException {
JobIndexInfo info=new JobIndexInfo();
JobID oldJobId=JobID.forName(JOB_ID);
JobId jobId=TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME);
info.setJobName(JOB_NAME_WITH_DELIMITER);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile=FileNameIndexUtils.getDoneFileName(info);
Assert.assertTrue("Job name not encoded correctly into job history file",jobHistoryFile.contains(JOB_NAME_WITH_DELIMITER_ESCAPE));
}
APIUtilityVerifier EqualityVerifier
@Test @SuppressWarnings("unchecked") public void testGetHistoryDirsForCleaning() throws IOException {
Path pRoot=new Path(TEST_DIR,"org.apache.hadoop.mapreduce.v2.jobhistory." + "TestJobHistoryUtils.testGetHistoryDirsForCleaning");
FileContext fc=FileContext.getFileContext();
Calendar cCal=Calendar.getInstance();
int year=2013;
int month=7;
int day=21;
cCal.set(year,month - 1,day,1,0);
long cutoff=cCal.getTimeInMillis();
clearDir(fc,pRoot);
Path pId00=createPath(fc,pRoot,year,month,day,"000000");
Path pId01=createPath(fc,pRoot,year,month,day + 1,"000001");
Path pId02=createPath(fc,pRoot,year,month,day - 1,"000002");
Path pId03=createPath(fc,pRoot,year,month + 1,day,"000003");
Path pId04=createPath(fc,pRoot,year,month + 1,day + 1,"000004");
Path pId05=createPath(fc,pRoot,year,month + 1,day - 1,"000005");
Path pId06=createPath(fc,pRoot,year,month - 1,day,"000006");
Path pId07=createPath(fc,pRoot,year,month - 1,day + 1,"000007");
Path pId08=createPath(fc,pRoot,year,month - 1,day - 1,"000008");
Path pId09=createPath(fc,pRoot,year + 1,month,day,"000009");
Path pId10=createPath(fc,pRoot,year + 1,month,day + 1,"000010");
Path pId11=createPath(fc,pRoot,year + 1,month,day - 1,"000011");
Path pId12=createPath(fc,pRoot,year + 1,month + 1,day,"000012");
Path pId13=createPath(fc,pRoot,year + 1,month + 1,day + 1,"000013");
Path pId14=createPath(fc,pRoot,year + 1,month + 1,day - 1,"000014");
Path pId15=createPath(fc,pRoot,year + 1,month - 1,day,"000015");
Path pId16=createPath(fc,pRoot,year + 1,month - 1,day + 1,"000016");
Path pId17=createPath(fc,pRoot,year + 1,month - 1,day - 1,"000017");
Path pId18=createPath(fc,pRoot,year - 1,month,day,"000018");
Path pId19=createPath(fc,pRoot,year - 1,month,day + 1,"000019");
Path pId20=createPath(fc,pRoot,year - 1,month,day - 1,"000020");
Path pId21=createPath(fc,pRoot,year - 1,month + 1,day,"000021");
Path pId22=createPath(fc,pRoot,year - 1,month + 1,day + 1,"000022");
Path pId23=createPath(fc,pRoot,year - 1,month + 1,day - 1,"000023");
Path pId24=createPath(fc,pRoot,year - 1,month - 1,day,"000024");
Path pId25=createPath(fc,pRoot,year - 1,month - 1,day + 1,"000025");
Path pId26=createPath(fc,pRoot,year - 1,month - 1,day - 1,"000026");
Path pId27=createPath(fc,pRoot,"foo","" + month,"" + day,"000027");
Path pId28=createPath(fc,pRoot,"" + year,"foo","" + day,"000028");
Path pId29=createPath(fc,pRoot,"" + year,"" + month,"foo","000029");
List dirs=JobHistoryUtils.getHistoryDirsForCleaning(fc,pRoot,cutoff);
Collections.sort(dirs);
Assert.assertEquals(14,dirs.size());
Assert.assertEquals(pId26.toUri().getPath(),dirs.get(0).getPath().toUri().getPath());
Assert.assertEquals(pId24.toUri().getPath(),dirs.get(1).getPath().toUri().getPath());
Assert.assertEquals(pId25.toUri().getPath(),dirs.get(2).getPath().toUri().getPath());
Assert.assertEquals(pId20.toUri().getPath(),dirs.get(3).getPath().toUri().getPath());
Assert.assertEquals(pId18.toUri().getPath(),dirs.get(4).getPath().toUri().getPath());
Assert.assertEquals(pId19.toUri().getPath(),dirs.get(5).getPath().toUri().getPath());
Assert.assertEquals(pId23.toUri().getPath(),dirs.get(6).getPath().toUri().getPath());
Assert.assertEquals(pId21.toUri().getPath(),dirs.get(7).getPath().toUri().getPath());
Assert.assertEquals(pId22.toUri().getPath(),dirs.get(8).getPath().toUri().getPath());
Assert.assertEquals(pId08.toUri().getPath(),dirs.get(9).getPath().toUri().getPath());
Assert.assertEquals(pId06.toUri().getPath(),dirs.get(10).getPath().toUri().getPath());
Assert.assertEquals(pId07.toUri().getPath(),dirs.get(11).getPath().toUri().getPath());
Assert.assertEquals(pId02.toUri().getPath(),dirs.get(12).getPath().toUri().getPath());
Assert.assertEquals(pId00.toUri().getPath(),dirs.get(13).getPath().toUri().getPath());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testGetJobFileWithUser(){
Configuration conf=new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,"/my/path/to/staging");
String jobFile=MRApps.getJobFile(conf,"dummy-user",new JobID("dummy-job",12345));
assertNotNull("getJobFile results in null.",jobFile);
assertEquals("jobFile with specified user is not as expected.","/my/path/to/staging/dummy-user/.staging/job_dummy-job_12345/job.xml",jobFile);
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=120000) public void testSetClasspathWithUserPrecendence(){
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST,true);
Map env=new HashMap();
try {
MRApps.setClasspath(env,conf);
}
catch ( Exception e) {
fail("Got exception while setting classpath");
}
String env_str=env.get("CLASSPATH");
String expectedClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),"job.jar/job.jar","job.jar/classes/","job.jar/lib/*",ApplicationConstants.Environment.PWD.$$() + "/*"));
assertTrue("MAPREDUCE_JOB_USER_CLASSPATH_FIRST set, but not taking effect!",env_str.startsWith(expectedClasspath));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testSetClasspathWithJobClassloader() throws IOException {
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER,true);
Map env=new HashMap();
MRApps.setClasspath(env,conf);
String cp=env.get("CLASSPATH");
String appCp=env.get("APP_CLASSPATH");
assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is in the" + " classpath!",cp.contains("jar" + ApplicationConstants.CLASS_PATH_SEPARATOR + "job"));
assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but PWD is in the classpath!",cp.contains("PWD"));
String expectedAppClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),"job.jar/job.jar","job.jar/classes/","job.jar/lib/*",ApplicationConstants.Environment.PWD.$$() + "/*"));
assertEquals("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is not in the app" + " classpath!",expectedAppClasspath,appCp);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testLogSystemProperties() throws Exception {
Configuration conf=new Configuration();
conf.set(MRJobConfig.MAPREDUCE_JVM_SYSTEM_PROPERTIES_TO_LOG," ");
String value=MRApps.getSystemPropertiesToLog(conf);
assertNull(value);
String classpath="java.class.path";
String os="os.name";
String version="java.version";
conf.set(MRJobConfig.MAPREDUCE_JVM_SYSTEM_PROPERTIES_TO_LOG,classpath + ", " + os);
value=MRApps.getSystemPropertiesToLog(conf);
assertNotNull(value);
assertTrue(value.contains(classpath));
assertTrue(value.contains(os));
assertFalse(value.contains(version));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("deprecation") @Test(timeout=30000) public void testSetupDistributedCache() throws Exception {
Configuration conf=new Configuration();
conf.setClass("fs.mockfs.impl",MockFileSystem.class,FileSystem.class);
URI mockUri=URI.create("mockfs://mock/");
FileSystem mockFs=((FilterFileSystem)FileSystem.get(mockUri,conf)).getRawFileSystem();
URI archive=new URI("mockfs://mock/tmp/something.zip");
Path archivePath=new Path(archive);
URI file=new URI("mockfs://mock/tmp/something.txt#something");
Path filePath=new Path(file);
when(mockFs.resolvePath(archivePath)).thenReturn(archivePath);
when(mockFs.resolvePath(filePath)).thenReturn(filePath);
DistributedCache.addCacheArchive(archive,conf);
conf.set(MRJobConfig.CACHE_ARCHIVES_TIMESTAMPS,"10");
conf.set(MRJobConfig.CACHE_ARCHIVES_SIZES,"10");
conf.set(MRJobConfig.CACHE_ARCHIVES_VISIBILITIES,"true");
DistributedCache.addCacheFile(file,conf);
conf.set(MRJobConfig.CACHE_FILE_TIMESTAMPS,"11");
conf.set(MRJobConfig.CACHE_FILES_SIZES,"11");
conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES,"true");
Map localResources=new HashMap();
MRApps.setupDistributedCache(conf,localResources);
assertEquals(2,localResources.size());
LocalResource lr=localResources.get("something.zip");
assertNotNull(lr);
assertEquals(10l,lr.getSize());
assertEquals(10l,lr.getTimestamp());
assertEquals(LocalResourceType.ARCHIVE,lr.getType());
lr=localResources.get("something");
assertNotNull(lr);
assertEquals(11l,lr.getSize());
assertEquals(11l,lr.getTimestamp());
assertEquals(LocalResourceType.FILE,lr.getType());
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=120000) public void testSetClasspathWithNoUserPrecendence(){
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST,false);
Map env=new HashMap();
try {
MRApps.setClasspath(env,conf);
}
catch ( Exception e) {
fail("Got exception while setting classpath");
}
String env_str=env.get("CLASSPATH");
String expectedClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList("job.jar/job.jar","job.jar/classes/","job.jar/lib/*",ApplicationConstants.Environment.PWD.$$() + "/*"));
assertTrue("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, and job.jar is not in" + " the classpath!",env_str.contains(expectedClasspath));
assertFalse("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, but taking effect!",env_str.startsWith(expectedClasspath));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=120000) public void testSetClasspath() throws IOException {
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
Job job=Job.getInstance(conf);
Map environment=new HashMap();
MRApps.setClasspath(environment,job.getConfiguration());
assertTrue(environment.get("CLASSPATH").startsWith(ApplicationConstants.Environment.PWD.$$() + ApplicationConstants.CLASS_PATH_SEPARATOR));
String yarnAppClasspath=job.getConfiguration().get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,StringUtils.join(",",YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH));
if (yarnAppClasspath != null) {
yarnAppClasspath=yarnAppClasspath.replaceAll(",\\s*",ApplicationConstants.CLASS_PATH_SEPARATOR).trim();
}
assertTrue(environment.get("CLASSPATH").contains(yarnAppClasspath));
String mrAppClasspath=job.getConfiguration().get(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH,MRJobConfig.DEFAULT_MAPREDUCE_CROSS_PLATFORM_APPLICATION_CLASSPATH);
if (mrAppClasspath != null) {
mrAppClasspath=mrAppClasspath.replaceAll(",\\s*",ApplicationConstants.CLASS_PATH_SEPARATOR).trim();
}
assertTrue(environment.get("CLASSPATH").contains(mrAppClasspath));
}
APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=3000000) public void testSetClasspathWithFramework() throws IOException {
final String FRAMEWORK_NAME="some-framework-name";
final String FRAMEWORK_PATH="some-framework-path#" + FRAMEWORK_NAME;
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
conf.set(MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH,FRAMEWORK_PATH);
Map env=new HashMap();
try {
MRApps.setClasspath(env,conf);
fail("Failed to catch framework path set without classpath change");
}
catch ( IllegalArgumentException e) {
assertTrue("Unexpected IllegalArgumentException",e.getMessage().contains("Could not locate MapReduce framework name '" + FRAMEWORK_NAME + "'"));
}
env.clear();
final String FRAMEWORK_CLASSPATH=FRAMEWORK_NAME + "/*.jar";
conf.set(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH,FRAMEWORK_CLASSPATH);
MRApps.setClasspath(env,conf);
final String stdClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList("job.jar/job.jar","job.jar/classes/","job.jar/lib/*",ApplicationConstants.Environment.PWD.$$() + "/*"));
String expectedClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),FRAMEWORK_CLASSPATH,stdClasspath));
assertEquals("Incorrect classpath with framework and no user precedence",expectedClasspath,env.get("CLASSPATH"));
env.clear();
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST,true);
MRApps.setClasspath(env,conf);
expectedClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),stdClasspath,FRAMEWORK_CLASSPATH));
assertEquals("Incorrect classpath with framework and user precedence",expectedClasspath,env.get("CLASSPATH"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=120000) public void testSetClasspathWithArchives() throws IOException {
File testTGZ=new File(testWorkDir,"test.tgz");
FileOutputStream out=new FileOutputStream(testTGZ);
out.write(0);
out.close();
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
Job job=Job.getInstance(conf);
conf=job.getConfiguration();
String testTGZQualifiedPath=FileSystem.getLocal(conf).makeQualified(new Path(testTGZ.getAbsolutePath())).toString();
conf.set(MRJobConfig.CLASSPATH_ARCHIVES,testTGZQualifiedPath);
conf.set(MRJobConfig.CACHE_ARCHIVES,testTGZQualifiedPath + "#testTGZ");
Map environment=new HashMap();
MRApps.setClasspath(environment,conf);
assertTrue(environment.get("CLASSPATH").startsWith(ApplicationConstants.Environment.PWD.$$() + ApplicationConstants.CLASS_PATH_SEPARATOR));
String confClasspath=job.getConfiguration().get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,StringUtils.join(",",YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH));
if (confClasspath != null) {
confClasspath=confClasspath.replaceAll(",\\s*",ApplicationConstants.CLASS_PATH_SEPARATOR).trim();
}
assertTrue(environment.get("CLASSPATH").contains(confClasspath));
assertTrue(environment.get("CLASSPATH").contains("testTGZ"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetMetricsAndJmx() throws Exception {
TestSource source=new TestSource("test");
MetricsSourceBuilder sb=MetricsAnnotations.newSourceBuilder(source);
final MetricsSource s=sb.build();
List injectedTags=new ArrayList();
MetricsSourceAdapter sa=new MetricsSourceAdapter("test","test","test desc",s,injectedTags,null,null,1,false);
MetricsCollectorImpl builder=new MetricsCollectorImpl();
Iterable metricsRecords=sa.getMetrics(builder,true);
MetricsRecordImpl metricsRecord=metricsRecords.iterator().next();
assertEquals(0L,metricsRecord.metrics().iterator().next().value().longValue());
Thread.sleep(100);
assertEquals(0L,(Number)sa.getAttribute("C1"));
source.incrementCnt();
builder=new MetricsCollectorImpl();
metricsRecords=sa.getMetrics(builder,true);
metricsRecord=metricsRecords.iterator().next();
assertTrue(metricsRecord.metrics().iterator().hasNext());
Thread.sleep(100);
assertEquals(1L,(Number)sa.getAttribute("C1"));
}
APIUtilityVerifier IdentityVerifier
@Test public void testHybrid(){
HybridMetrics metrics=new HybridMetrics();
MetricsSource source=MetricsAnnotations.makeSource(metrics);
assertSame(metrics,source);
metrics.C0.incr();
MetricsRecordBuilder rb=getMetrics(source);
MetricsCollector collector=rb.parent();
verify(collector).addRecord("foo");
verify(collector).addRecord("bar");
verify(collector).addRecord(info("HybridMetrics","HybridMetrics"));
verify(rb).setContext("foocontext");
verify(rb).addCounter(info("C1","C1 desc"),1);
verify(rb).setContext("barcontext");
verify(rb).addGauge(info("G1","G1 desc"),1);
verify(rb).add(tag(MsInfo.Context,"hybrid"));
verify(rb).addCounter(info("C0","C0 desc"),1);
verify(rb).addGauge(info("G0","G0"),0);
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=6000) public void testFileSink() throws IOException {
outFile=getTestTempFile("test-file-sink-",".out");
final String outPath=outFile.getAbsolutePath();
new ConfigBuilder().add("*.period",10000).add("test.sink.mysink0.class",FileSink.class.getName()).add("test.sink.mysink0.filename",outPath).add("test.sink.mysink0.context","test1").save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms=new MetricsSystemImpl("test");
ms.start();
final MyMetrics1 mm1=new MyMetrics1().registerWith(ms);
new MyMetrics2().registerWith(ms);
mm1.testMetric1.incr();
mm1.testMetric2.incr(2);
ms.publishMetricsNow();
ms.stop();
ms.shutdown();
InputStream is=null;
ByteArrayOutputStream baos=null;
String outFileContent=null;
try {
is=new FileInputStream(outFile);
baos=new ByteArrayOutputStream((int)outFile.length());
IOUtils.copyBytes(is,baos,1024,true);
outFileContent=new String(baos.toByteArray(),"UTF-8");
}
finally {
IOUtils.cleanup(null,baos,is);
}
Pattern expectedContentPattern=Pattern.compile("^\\d+\\s+test1.testRecord1:\\s+Context=test1,\\s+" + "(testTag1=testTagValue1,\\s+testTag2=testTagValue2|testTag2=testTagValue2,\\s+testTag1=testTagValue1)," + "\\s+Hostname=.*,\\s+(testMetric1=1,\\s+testMetric2=2|testMetric2=2,\\s+testMetric1=1)"+ "$[\\n\\r]*^\\d+\\s+test1.testRecord2:\\s+Context=test1,"+ "\\s+testTag22=testTagValue22,\\s+Hostname=.*$[\\n\\r]*",Pattern.MULTILINE);
assertTrue(expectedContentPattern.matcher(outFileContent).matches());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier
/**
* Correctness test that checks that absolute error of the estimate is within
* specified error bounds for some randomly permuted streams of items.
*/
@Test public void testQuantileError() throws IOException {
final int count=100000;
Random r=new Random(0xDEADDEAD);
Long[] values=new Long[count];
for (int i=0; i < count; i++) {
values[i]=(long)(i + 1);
}
for (int i=0; i < 10; i++) {
System.out.println("Starting run " + i);
Collections.shuffle(Arrays.asList(values),r);
estimator.clear();
for (int j=0; j < count; j++) {
estimator.insert(values[j]);
}
Map snapshot;
snapshot=estimator.snapshot();
for ( Quantile q : quantiles) {
long actual=(long)(q.quantile * count);
long error=(long)(q.error * count);
long estimate=snapshot.get(q);
System.out.println(String.format("Expected %d with error %d, estimated %d",actual,error,estimate));
assertTrue(estimate <= actual + error);
assertTrue(estimate >= actual - error);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testKerberosLogin() throws Exception {
MiniKdc kdc=getKdc();
File workDir=getWorkDir();
LoginContext loginContext=null;
try {
String principal="foo";
File keytab=new File(workDir,"foo.keytab");
kdc.createPrincipal(keytab,principal);
Set principals=new HashSet();
principals.add(new KerberosPrincipal(principal));
Subject subject=new Subject(false,principals,new HashSet(),new HashSet());
loginContext=new LoginContext("",subject,null,KerberosConfiguration.createClientConfig(principal,keytab));
loginContext.login();
subject=loginContext.getSubject();
Assert.assertEquals(1,subject.getPrincipals().size());
Assert.assertEquals(KerberosPrincipal.class,subject.getPrincipals().iterator().next().getClass());
Assert.assertEquals(principal + "@" + kdc.getRealm(),subject.getPrincipals().iterator().next().getName());
loginContext.logout();
subject=new Subject(false,principals,new HashSet(),new HashSet());
loginContext=new LoginContext("",subject,null,KerberosConfiguration.createServerConfig(principal,keytab));
loginContext.login();
subject=loginContext.getSubject();
Assert.assertEquals(1,subject.getPrincipals().size());
Assert.assertEquals(KerberosPrincipal.class,subject.getPrincipals().iterator().next().getClass());
Assert.assertEquals(principal + "@" + kdc.getRealm(),subject.getPrincipals().iterator().next().getName());
loginContext.logout();
}
finally {
if (loginContext != null) {
loginContext.logout();
}
}
}
APIUtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test that repeated calls to getting the local host are fairly fast, and
* hence that caching is being used
* @throws Exception if hostname lookups fail
*/
@Test public void testGetLocalHostIsFast() throws Exception {
String hostname1=DNS.getDefaultHost(DEFAULT);
assertNotNull(hostname1);
String hostname2=DNS.getDefaultHost(DEFAULT);
long t1=Time.now();
String hostname3=DNS.getDefaultHost(DEFAULT);
long t2=Time.now();
assertEquals(hostname3,hostname2);
assertEquals(hostname2,hostname1);
long interval=t2 - t1;
assertTrue("Took too long to determine local host - caching is not working",interval < 20000);
}
APIUtilityVerifier EqualityVerifier
/**
* Test the "default" IP addresses is the local IP addr
*/
@Test public void testGetIPWithDefault() throws Exception {
String[] ips=DNS.getIPs(DEFAULT);
assertEquals("Should only return 1 default IP",1,ips.length);
assertEquals(getLocalIPAddr().getHostAddress(),ips[0].toString());
String ip=DNS.getDefaultIP(DEFAULT);
assertEquals(ip,ips[0].toString());
}
APIUtilityVerifier NullVerifier
/**
* Test that asking for the default hostname works
* @throws Exception if hostname lookups fail
*/
@Test public void testGetLocalHost() throws Exception {
String hostname=DNS.getDefaultHost(DEFAULT);
assertNotNull(hostname);
}
APIUtilityVerifier IterativeVerifier BranchVerifier BooleanVerifier
/**
* Test for {@link NetUtils#isLocalAddress(java.net.InetAddress)}
*/
@Test public void testIsLocalAddress() throws Exception {
assertTrue(NetUtils.isLocalAddress(InetAddress.getLocalHost()));
Enumeration interfaces=NetworkInterface.getNetworkInterfaces();
if (interfaces != null) {
while (interfaces.hasMoreElements()) {
NetworkInterface i=interfaces.nextElement();
Enumeration addrs=i.getInetAddresses();
if (addrs == null) {
continue;
}
while (addrs.hasMoreElements()) {
InetAddress addr=addrs.nextElement();
assertTrue(NetUtils.isLocalAddress(addr));
}
}
}
assertFalse(NetUtils.isLocalAddress(InetAddress.getByName("8.8.8.8")));
}
APIUtilityVerifier EqualityVerifier
@Test public void testGetConnectAddress() throws IOException {
NetUtils.addStaticResolution("host","127.0.0.1");
InetSocketAddress addr=NetUtils.createSocketAddrForHost("host",1);
InetSocketAddress connectAddr=NetUtils.getConnectAddress(addr);
assertEquals(addr.getHostName(),connectAddr.getHostName());
addr=new InetSocketAddress(1);
connectAddr=NetUtils.getConnectAddress(addr);
assertEquals(InetAddress.getLocalHost().getHostName(),connectAddr.getHostName());
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test for {@link NetUtils#normalizeHostNames}
*/
@Test public void testNormalizeHostName(){
List hosts=Arrays.asList(new String[]{"127.0.0.1","localhost","1.kanyezone.appspot.com","UnknownHost123"});
List normalizedHosts=NetUtils.normalizeHostNames(hosts);
assertEquals(normalizedHosts.get(0),hosts.get(0));
assertFalse(normalizedHosts.get(1).equals(hosts.get(1)));
assertEquals(normalizedHosts.get(1),hosts.get(0));
assertFalse(normalizedHosts.get(2).equals(hosts.get(2)));
assertEquals(normalizedHosts.get(3),hosts.get(3));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=180000) public void testInvalidNetworkTopologiesNotCachedInHdfs() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
String racks[]={"/a/b","/c"};
String hosts[]={"foo1.example.com","foo2.example.com"};
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).racks(racks).hosts(hosts).build();
cluster.waitActive();
NamenodeProtocols nn=cluster.getNameNodeRpc();
Assert.assertNotNull(nn);
DatanodeInfo[] info;
while (true) {
info=nn.getDatanodeReport(DatanodeReportType.LIVE);
Assert.assertFalse(info.length == 2);
if (info.length == 1) {
break;
}
Thread.sleep(1000);
}
int validIdx=info[0].getHostName().equals(hosts[0]) ? 0 : 1;
int invalidIdx=validIdx == 1 ? 0 : 1;
StaticMapping.addNodeToRack(hosts[invalidIdx],racks[validIdx]);
LOG.info("datanode " + validIdx + " came up with network location "+ info[0].getNetworkLocation());
cluster.restartDataNode(invalidIdx);
Thread.sleep(5000);
while (true) {
info=nn.getDatanodeReport(DatanodeReportType.LIVE);
if (info.length == 2) {
break;
}
if (info.length == 0) {
LOG.info("got no valid DNs");
}
else if (info.length == 1) {
LOG.info("got one valid DN: " + info[0].getHostName() + " (at "+ info[0].getNetworkLocation()+ ")");
}
Thread.sleep(1000);
}
Assert.assertEquals(info[0].getNetworkLocation(),info[1].getNetworkLocation());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier BooleanVerifier
/**
* This test checks that chooseRandom works for an excluded node.
*/
@Test public void testChooseRandomExcludedNode(){
String scope="~" + NodeBase.getPath(dataNodes[0]);
Map frequency=pickNodesAtRandom(100,scope);
for ( Node key : dataNodes) {
assertTrue(frequency.get(key) > 0 || key == dataNodes[0]);
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This test checks that chooseRandom works for an excluded rack.
*/
@Test public void testChooseRandomExcludedRack(){
Map frequency=pickNodesAtRandom(100,"~" + "/d2");
for (int j=0; j < dataNodes.length; j++) {
int freq=frequency.get(dataNodes[j]);
if (dataNodes[j].getNetworkLocation().startsWith("/d2")) {
assertEquals(0,freq);
}
else {
assertTrue(freq > 0);
}
}
}
APIUtilityVerifier BooleanVerifier
/**
* Test replica placement policy in case last node is invalid.
* We create 6 nodes but the last node is in fault topology (with rack info),
* so cannot be added to cluster. We should test proper exception is thrown in
* adding node but shouldn't affect the cluster.
*/
@Test public void testChooseRandomExcludedNode(){
String scope="~" + NodeBase.getPath(dataNodes[0]);
Map frequency=pickNodesAtRandom(100,scope);
for ( Node key : dataNodes) {
assertTrue(frequency.get(key) > 0 || key == dataNodes[0]);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testFilenameMeansMultiSwitch() throws Throwable {
Configuration conf=new Configuration();
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY,"any-filename");
ScriptBasedMapping mapping=createMapping(conf);
assertFalse("Expected to be multi switch",mapping.isSingleSwitch());
mapping.setConf(new Configuration());
assertTrue("Expected to be single switch",mapping.isSingleSwitch());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testNoFilenameMeansSingleSwitch() throws Throwable {
Configuration conf=new Configuration();
ScriptBasedMapping mapping=createMapping(conf);
assertTrue("Expected to be single switch",mapping.isSingleSwitch());
assertTrue("Expected to be single switch",AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testNoArgsMeansNoResult(){
Configuration conf=new Configuration();
conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY,ScriptBasedMapping.MIN_ALLOWABLE_ARGS - 1);
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY,"any-filename");
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY,"any-filename");
ScriptBasedMapping mapping=createMapping(conf);
List names=new ArrayList();
names.add("some.machine.name");
names.add("other.machine.name");
List result=mapping.resolve(names);
assertNull("Expected an empty list",result);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testNoFilenameMeansSingleSwitch() throws Throwable {
Configuration conf=new Configuration();
ScriptBasedMapping mapping=createMapping(conf);
assertTrue("Expected to be single switch",mapping.isSingleSwitch());
assertTrue("Expected to be single switch",AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testNoArgsMeansNoResult(){
Configuration conf=new Configuration();
conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY,ScriptBasedMapping.MIN_ALLOWABLE_ARGS - 1);
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY,"any-filename-1");
conf.set(ScriptBasedMappingWithDependency.DEPENDENCY_SCRIPT_FILENAME_KEY,"any-filename-2");
conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY,10);
ScriptBasedMappingWithDependency mapping=createMapping(conf);
List names=new ArrayList();
names.add("some.machine.name");
names.add("other.machine.name");
List result=mapping.resolve(names);
assertNull("Expected an empty list for resolve",result);
result=mapping.getDependency("some.machine.name");
assertNull("Expected an empty list for getDependency",result);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testFilenameMeansMultiSwitch() throws Throwable {
Configuration conf=new Configuration();
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY,"any-filename");
ScriptBasedMapping mapping=createMapping(conf);
assertFalse("Expected to be multi switch",mapping.isSingleSwitch());
mapping.setConf(new Configuration());
assertTrue("Expected to be single switch",mapping.isSingleSwitch());
}
APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testSocketIOWithTimeout() throws Exception {
Pipe pipe=Pipe.open();
Pipe.SourceChannel source=pipe.source();
Pipe.SinkChannel sink=pipe.sink();
try {
final InputStream in=new SocketInputStream(source,TIMEOUT);
OutputStream out=new SocketOutputStream(sink,TIMEOUT);
byte[] writeBytes=TEST_STRING.getBytes();
byte[] readBytes=new byte[writeBytes.length];
byte byteWithHighBit=(byte)0x80;
out.write(writeBytes);
out.write(byteWithHighBit);
doIO(null,out,TIMEOUT);
in.read(readBytes);
assertTrue(Arrays.equals(writeBytes,readBytes));
assertEquals(byteWithHighBit & 0xff,in.read());
doIO(in,null,TIMEOUT);
((SocketInputStream)in).setTimeout(TIMEOUT * 2);
doIO(in,null,TIMEOUT * 2);
((SocketInputStream)in).setTimeout(0);
TestingThread thread=new TestingThread(ctx){
@Override public void doWork() throws Exception {
try {
in.read();
fail("Did not fail with interrupt");
}
catch ( InterruptedIOException ste) {
LOG.info("Got expection while reading as expected : " + ste.getMessage());
}
}
}
;
ctx.addThread(thread);
ctx.startThreads();
Thread.sleep(1000);
thread.interrupt();
ctx.stop();
assertTrue(source.isOpen());
assertTrue(sink.isOpen());
if (!Shell.WINDOWS && !Shell.PPC_64) {
try {
out.write(1);
fail("Did not throw");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("stream is closed",ioe);
}
}
out.close();
assertFalse(sink.isOpen());
assertEquals(-1,in.read());
in.close();
assertFalse(source.isOpen());
}
finally {
if (source != null) {
source.close();
}
if (sink != null) {
sink.close();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test setting some server options.
* @throws IOException
*/
@Test(timeout=180000) public void testServerOptions() throws Exception {
final String TEST_PATH=new File(sockDir.getDir(),"test_sock_server_options").getAbsolutePath();
DomainSocket serv=DomainSocket.bindAndListen(TEST_PATH);
try {
int bufSize=serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE);
int newBufSize=bufSize / 2;
serv.setAttribute(DomainSocket.RECEIVE_BUFFER_SIZE,newBufSize);
int nextBufSize=serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE);
Assert.assertEquals(newBufSize,nextBufSize);
int newTimeout=1000;
serv.setAttribute(DomainSocket.RECEIVE_TIMEOUT,newTimeout);
int nextTimeout=serv.getAttribute(DomainSocket.RECEIVE_TIMEOUT);
Assert.assertEquals(newTimeout,nextTimeout);
try {
serv.accept();
Assert.fail("expected the accept() to time out and fail");
}
catch ( SocketTimeoutException e) {
GenericTestUtils.assertExceptionContains("accept(2) error: ",e);
}
}
finally {
serv.close();
Assert.assertFalse(serv.isOpen());
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier PublicFieldVerifier HybridVerifier
/**
* Test file descriptor passing.
* @throws IOException
*/
@Test(timeout=180000) public void testFdPassing() throws Exception {
final String TEST_PATH=new File(sockDir.getDir(),"test_sock").getAbsolutePath();
final byte clientMsg1[]=new byte[]{0x11,0x22,0x33,0x44,0x55,0x66};
final byte serverMsg1[]=new byte[]{0x31,0x30,0x32,0x34,0x31,0x33,0x44,0x1,0x1,0x1,0x1,0x1};
final ArrayBlockingQueue threadResults=new ArrayBlockingQueue(2);
final DomainSocket serv=DomainSocket.bindAndListen(TEST_PATH);
final PassedFile passedFiles[]=new PassedFile[]{new PassedFile(1),new PassedFile(2)};
final FileDescriptor passedFds[]=new FileDescriptor[passedFiles.length];
for (int i=0; i < passedFiles.length; i++) {
passedFds[i]=passedFiles[i].getInputStream().getFD();
}
Thread serverThread=new Thread(){
public void run(){
DomainSocket conn=null;
try {
conn=serv.accept();
byte in1[]=new byte[clientMsg1.length];
InputStream connInputStream=conn.getInputStream();
IOUtils.readFully(connInputStream,in1,0,in1.length);
Assert.assertTrue(Arrays.equals(clientMsg1,in1));
DomainSocket domainConn=(DomainSocket)conn;
domainConn.sendFileDescriptors(passedFds,serverMsg1,0,serverMsg1.length);
conn.close();
}
catch ( Throwable e) {
threadResults.add(e);
Assert.fail(e.getMessage());
}
threadResults.add(new Success());
}
}
;
serverThread.start();
Thread clientThread=new Thread(){
public void run(){
try {
DomainSocket client=DomainSocket.connect(TEST_PATH);
OutputStream clientOutputStream=client.getOutputStream();
InputStream clientInputStream=client.getInputStream();
clientOutputStream.write(clientMsg1);
DomainSocket domainConn=(DomainSocket)client;
byte in1[]=new byte[serverMsg1.length];
FileInputStream recvFis[]=new FileInputStream[passedFds.length];
int r=domainConn.recvFileInputStreams(recvFis,in1,0,in1.length - 1);
Assert.assertTrue(r > 0);
IOUtils.readFully(clientInputStream,in1,r,in1.length - r);
Assert.assertTrue(Arrays.equals(serverMsg1,in1));
for (int i=0; i < passedFds.length; i++) {
Assert.assertNotNull(recvFis[i]);
passedFiles[i].checkInputStream(recvFis[i]);
}
for ( FileInputStream fis : recvFis) {
fis.close();
}
client.close();
}
catch ( Throwable e) {
threadResults.add(e);
}
threadResults.add(new Success());
}
}
;
clientThread.start();
for (int i=0; i < 2; i++) {
Throwable t=threadResults.take();
if (!(t instanceof Success)) {
Assert.fail(t.getMessage() + ExceptionUtils.getStackTrace(t));
}
}
serverThread.join(120000);
clientThread.join(120000);
serv.close();
for ( PassedFile pf : passedFiles) {
pf.cleanup();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that we get a read result of -1 on EOF.
* @throws IOException
*/
@Test(timeout=180000) public void testSocketReadEof() throws Exception {
final String TEST_PATH=new File(sockDir.getDir(),"testSocketReadEof").getAbsolutePath();
final DomainSocket serv=DomainSocket.bindAndListen(TEST_PATH);
ExecutorService exeServ=Executors.newSingleThreadExecutor();
Callable callable=new Callable(){
public Void call(){
DomainSocket conn;
try {
conn=serv.accept();
}
catch ( IOException e) {
throw new RuntimeException("unexpected IOException",e);
}
byte buf[]=new byte[100];
for (int i=0; i < buf.length; i++) {
buf[i]=0;
}
try {
Assert.assertEquals(-1,conn.getInputStream().read());
}
catch ( IOException e) {
throw new RuntimeException("unexpected IOException",e);
}
return null;
}
}
;
Future future=exeServ.submit(callable);
DomainSocket conn=DomainSocket.connect(serv.getPath());
Thread.sleep(50);
conn.close();
serv.close();
future.get(2,TimeUnit.MINUTES);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=180000) public void testShutdown() throws Exception {
final AtomicInteger bytesRead=new AtomicInteger(0);
final AtomicBoolean failed=new AtomicBoolean(false);
final DomainSocket[] socks=DomainSocket.socketpair();
Runnable reader=new Runnable(){
@Override public void run(){
while (true) {
try {
int ret=socks[1].getInputStream().read();
if (ret == -1) return;
bytesRead.addAndGet(1);
}
catch ( IOException e) {
DomainSocket.LOG.error("reader error",e);
failed.set(true);
return;
}
}
}
}
;
Thread readerThread=new Thread(reader);
readerThread.start();
socks[0].getOutputStream().write(1);
socks[0].getOutputStream().write(2);
socks[0].getOutputStream().write(3);
Assert.assertTrue(readerThread.isAlive());
socks[0].shutdown();
readerThread.join();
Assert.assertFalse(failed.get());
Assert.assertEquals(3,bytesRead.get());
IOUtils.cleanup(null,socks);
}
APIUtilityVerifier EqualityVerifier
@Test public void testStaticMapParsing() throws IOException {
File tempStaticMapFile=File.createTempFile("nfs-",".map");
final String staticMapFileContents="uid 10 100\n" + "gid 10 200\n" + "uid 11 201 # comment at the end of a line\n"+ "uid 12 301\n"+ "# Comment at the beginning of a line\n"+ " # Comment that starts late in the line\n"+ "uid 10000 10001# line without whitespace before comment\n"+ "uid 13 302\n"+ "gid\t11\t201\n"+ "\n"+ "gid 12 202";
OutputStream out=new FileOutputStream(tempStaticMapFile);
out.write(staticMapFileContents.getBytes());
out.close();
StaticMapping parsedMap=IdUserGroup.parseStaticMap(tempStaticMapFile);
assertEquals(10,(int)parsedMap.uidMapping.get(100));
assertEquals(11,(int)parsedMap.uidMapping.get(201));
assertEquals(12,(int)parsedMap.uidMapping.get(301));
assertEquals(13,(int)parsedMap.uidMapping.get(302));
assertEquals(10,(int)parsedMap.gidMapping.get(200));
assertEquals(11,(int)parsedMap.gidMapping.get(201));
assertEquals(12,(int)parsedMap.gidMapping.get(202));
assertEquals(10000,(int)parsedMap.uidMapping.get(10001));
assertEquals(1000,(int)parsedMap.uidMapping.get(1000));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCacheFunctionality() throws UnknownHostException {
RpcCallCache cache=new RpcCallCache("Test",10);
int size=0;
for (int clientId=0; clientId < 20; clientId++) {
InetAddress clientIp=InetAddress.getByName("1.1.1." + clientId);
System.out.println("Adding " + clientIp);
cache.checkOrAddToCache(clientIp,0);
size=Math.min(++size,10);
System.out.println("Cache size " + cache.size());
assertEquals(size,cache.size());
int startEntry=Math.max(clientId - 10 + 1,0);
Iterator> iterator=cache.iterator();
for (int i=0; i < size; i++) {
ClientRequest key=iterator.next().getKey();
System.out.println("Entry " + key.getClientId());
assertEquals(InetAddress.getByName("1.1.1." + (startEntry + i)),key.getClientId());
}
for (int i=0; i < size; i++) {
CacheEntry e=cache.checkOrAddToCache(InetAddress.getByName("1.1.1." + (startEntry + i)),0);
assertNotNull(e);
assertTrue(e.isInProgress());
assertFalse(e.isCompleted());
}
}
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=1000) public void testIdle() throws InterruptedException, IOException {
Socket s=new Socket();
try {
s.connect(pm.getTcpServerLocalAddress());
int i=0;
while (!s.isConnected() && i < RETRY_TIMES) {
++i;
Thread.sleep(SHORT_TIMEOUT_MILLISECONDS);
}
Assert.assertTrue("Failed to connect to the server",s.isConnected() && i < RETRY_TIMES);
int b=s.getInputStream().read();
Assert.assertTrue("The server failed to disconnect",b == -1);
}
finally {
s.close();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("unchecked") @Test public void testReadWriteStorage() throws IOException, NoSuchAlgorithmException {
Credentials ts=new Credentials();
Token token1=new Token();
Token token2=new Token();
Text service1=new Text("service1");
Text service2=new Text("service2");
Collection services=new ArrayList();
services.add(service1);
services.add(service2);
token1.setService(service1);
token2.setService(service2);
ts.addToken(new Text("sometoken1"),token1);
ts.addToken(new Text("sometoken2"),token2);
final KeyGenerator kg=KeyGenerator.getInstance(DEFAULT_HMAC_ALGORITHM);
String alias="alias";
Map m=new HashMap(10);
for (int i=0; i < 10; i++) {
Key key=kg.generateKey();
m.put(new Text(alias + i),key.getEncoded());
ts.addSecretKey(new Text(alias + i),key.getEncoded());
}
File tmpFileName=new File(tmpDir,"tokenStorageTest");
DataOutputStream dos=new DataOutputStream(new FileOutputStream(tmpFileName));
ts.write(dos);
dos.close();
DataInputStream dis=new DataInputStream(new FileInputStream(tmpFileName));
ts=new Credentials();
ts.readFields(dis);
dis.close();
Collection> list=ts.getAllTokens();
assertEquals("getAllTokens should return collection of size 2",list.size(),2);
boolean foundFirst=false;
boolean foundSecond=false;
for ( Token extends TokenIdentifier> token : list) {
if (token.getService().equals(service1)) {
foundFirst=true;
}
if (token.getService().equals(service2)) {
foundSecond=true;
}
}
assertTrue("Tokens for services service1 and service2 must be present",foundFirst && foundSecond);
int mapLen=m.size();
assertEquals("wrong number of keys in the Storage",mapLen,ts.numberOfSecretKeys());
for ( Text a : m.keySet()) {
byte[] kTS=ts.getSecretKey(a);
byte[] kLocal=m.get(a);
assertTrue("keys don't match for " + a,WritableComparator.compareBytes(kTS,0,kTS.length,kLocal,0,kLocal.length) == 0);
}
tmpFileName.delete();
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier
@Test public void testRealUserIPNotSpecified() throws IOException {
final Configuration conf=new Configuration();
conf.setStrings(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),"group1");
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
refreshConf(conf);
try {
server.start();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
UserGroupInformation realUserUgi=UserGroupInformation.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi=UserGroupInformation.createProxyUserForTesting(PROXY_USER_NAME,realUserUgi,GROUP_NAMES);
String retVal=proxyUserUgi.doAs(new PrivilegedExceptionAction(){
@Override public String run() throws IOException {
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
String ret=proxy.aMethod();
return ret;
}
}
);
Assert.fail("The RPC must have failed " + retVal);
}
catch ( Exception e) {
e.printStackTrace();
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier
@Test public void testRealUserIPAuthorizationFailure() throws IOException {
final Configuration conf=new Configuration();
conf.setStrings(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(REAL_USER_SHORT_NAME),"20.20.20.20");
conf.setStrings(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),"group1");
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
refreshConf(conf);
try {
server.start();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
UserGroupInformation realUserUgi=UserGroupInformation.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi=UserGroupInformation.createProxyUserForTesting(PROXY_USER_NAME,realUserUgi,GROUP_NAMES);
String retVal=proxyUserUgi.doAs(new PrivilegedExceptionAction(){
@Override public String run() throws IOException {
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
String ret=proxy.aMethod();
return ret;
}
}
);
Assert.fail("The RPC must have failed " + retVal);
}
catch ( Exception e) {
e.printStackTrace();
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testProxyWithToken() throws Exception {
final Configuration conf=new Configuration(masterConf);
TestTokenSecretManager sm=new TestTokenSecretManager();
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,conf);
UserGroupInformation.setConfiguration(conf);
final Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
server.start();
final UserGroupInformation current=UserGroupInformation.createRemoteUser(REAL_USER_NAME);
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId=new TestTokenIdentifier(new Text(current.getUserName()),new Text("SomeSuperUser"));
Token token=new Token(tokenId,sm);
SecurityUtil.setTokenService(token,addr);
UserGroupInformation proxyUserUgi=UserGroupInformation.createProxyUserForTesting(PROXY_USER_NAME,current,GROUP_NAMES);
proxyUserUgi.addToken(token);
refreshConf(conf);
String retVal=proxyUserUgi.doAs(new PrivilegedExceptionAction(){
@Override public String run() throws Exception {
try {
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
String ret=proxy.aMethod();
return ret;
}
catch ( Exception e) {
e.printStackTrace();
throw e;
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
}
);
Assert.assertEquals(REAL_USER_NAME + " (auth:TOKEN) via SomeSuperUser (auth:SIMPLE)",retVal);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTokenBySuperUser() throws Exception {
TestTokenSecretManager sm=new TestTokenSecretManager();
final Configuration newConf=new Configuration(masterConf);
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,newConf);
UserGroupInformation.setConfiguration(newConf);
final Server server=new RPC.Builder(newConf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
server.start();
final UserGroupInformation current=UserGroupInformation.createUserForTesting(REAL_USER_NAME,GROUP_NAMES);
refreshConf(newConf);
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId=new TestTokenIdentifier(new Text(current.getUserName()),new Text("SomeSuperUser"));
Token token=new Token(tokenId,sm);
SecurityUtil.setTokenService(token,addr);
current.addToken(token);
String retVal=current.doAs(new PrivilegedExceptionAction(){
@Override public String run() throws Exception {
try {
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,newConf);
String ret=proxy.aMethod();
return ret;
}
catch ( Exception e) {
e.printStackTrace();
throw e;
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
}
);
String expected=REAL_USER_NAME + " (auth:TOKEN) via SomeSuperUser (auth:SIMPLE)";
Assert.assertEquals(retVal + "!=" + expected,expected,retVal);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier
@Test public void testRealUserGroupAuthorizationFailure() throws IOException {
final Configuration conf=new Configuration();
configureSuperUserIPAddresses(conf,REAL_USER_SHORT_NAME);
conf.setStrings(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),"group3");
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
refreshConf(conf);
try {
server.start();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
UserGroupInformation realUserUgi=UserGroupInformation.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi=UserGroupInformation.createProxyUserForTesting(PROXY_USER_NAME,realUserUgi,GROUP_NAMES);
String retVal=proxyUserUgi.doAs(new PrivilegedExceptionAction(){
@Override public String run() throws IOException {
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
String ret=proxy.aMethod();
return ret;
}
}
);
Assert.fail("The RPC must have failed " + retVal);
}
catch ( Exception e) {
e.printStackTrace();
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier
@Test public void testRealUserGroupNotSpecified() throws IOException {
final Configuration conf=new Configuration();
configureSuperUserIPAddresses(conf,REAL_USER_SHORT_NAME);
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
try {
server.start();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
UserGroupInformation realUserUgi=UserGroupInformation.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi=UserGroupInformation.createProxyUserForTesting(PROXY_USER_NAME,realUserUgi,GROUP_NAMES);
String retVal=proxyUserUgi.doAs(new PrivilegedExceptionAction(){
@Override public String run() throws IOException {
proxy=(TestProtocol)RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
String ret=proxy.aMethod();
return ret;
}
}
);
Assert.fail("The RPC must have failed " + retVal);
}
catch ( Exception e) {
e.printStackTrace();
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test method for{@link org.apache.hadoop.security.UserGroupInformation#createProxyUser(java.lang.String,org.apache.hadoop.security.UserGroupInformation)}.
*/
@Test public void testCreateProxyUser() throws Exception {
UserGroupInformation realUserUgi=UserGroupInformation.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi=UserGroupInformation.createProxyUser(PROXY_USER_NAME,realUserUgi);
UserGroupInformation curUGI=proxyUserUgi.doAs(new PrivilegedExceptionAction(){
@Override public UserGroupInformation run() throws IOException {
return UserGroupInformation.getCurrentUser();
}
}
);
Assert.assertEquals(PROXY_USER_NAME + " (auth:PROXY) via " + REAL_USER_NAME+ " (auth:SIMPLE)",curUGI.toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConfGetPassword() throws Exception {
File testDir=new File(System.getProperty("test.build.data","target/test-dir"));
Configuration conf=new Configuration();
final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file/" + testDir+ "/test.jks";
File file=new File(testDir,"test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,ourUrl);
CredentialProvider provider=CredentialProviderFactory.getProviders(conf).get(0);
char[] bindpass={'b','i','n','d','p','a','s','s'};
char[] storepass={'s','t','o','r','e','p','a','s','s'};
assertEquals(null,provider.getCredentialEntry(LdapGroupsMapping.BIND_PASSWORD_KEY));
assertEquals(null,provider.getCredentialEntry(LdapGroupsMapping.LDAP_KEYSTORE_PASSWORD_KEY));
try {
provider.createCredentialEntry(LdapGroupsMapping.BIND_PASSWORD_KEY,bindpass);
provider.createCredentialEntry(LdapGroupsMapping.LDAP_KEYSTORE_PASSWORD_KEY,storepass);
provider.flush();
}
catch ( Exception e) {
e.printStackTrace();
throw e;
}
assertArrayEquals(bindpass,provider.getCredentialEntry(LdapGroupsMapping.BIND_PASSWORD_KEY).getCredential());
assertArrayEquals(storepass,provider.getCredentialEntry(LdapGroupsMapping.LDAP_KEYSTORE_PASSWORD_KEY).getCredential());
LdapGroupsMapping mapping=new LdapGroupsMapping();
Assert.assertEquals("bindpass",mapping.getPassword(conf,LdapGroupsMapping.BIND_PASSWORD_KEY,""));
Assert.assertEquals("storepass",mapping.getPassword(conf,LdapGroupsMapping.LDAP_KEYSTORE_PASSWORD_KEY,""));
Assert.assertEquals("",mapping.getPassword(conf,"invalid-alias",""));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFilePermission() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
try {
FileSystem nnfs=FileSystem.get(conf);
assertFalse(nnfs.exists(CHILD_FILE1));
try {
nnfs.setOwner(CHILD_FILE1,"foo","bar");
assertTrue(false);
}
catch ( java.io.FileNotFoundException e) {
LOG.info("GOOD: got " + e);
}
try {
nnfs.setPermission(CHILD_FILE1,new FsPermission((short)0777));
assertTrue(false);
}
catch ( java.io.FileNotFoundException e) {
LOG.info("GOOD: got " + e);
}
FSDataOutputStream out=nnfs.create(CHILD_FILE1,new FsPermission((short)0777),true,1024,(short)1,1024,null);
FileStatus status=nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rwxr-xr-x"));
nnfs.delete(CHILD_FILE1,false);
nnfs.mkdirs(CHILD_DIR1);
out=nnfs.create(CHILD_FILE1);
status=nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rw-r--r--"));
byte data[]=new byte[FILE_LEN];
RAN.nextBytes(data);
out.write(data);
out.close();
nnfs.setPermission(CHILD_FILE1,new FsPermission("700"));
status=nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rwx------"));
byte dataIn[]=new byte[FILE_LEN];
FSDataInputStream fin=nnfs.open(CHILD_FILE1);
int bytesRead=fin.read(dataIn);
assertTrue(bytesRead == FILE_LEN);
for (int i=0; i < FILE_LEN; i++) {
assertEquals(data[i],dataIn[i]);
}
nnfs.setPermission(CHILD_FILE1,new FsPermission("755"));
status=nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rwxr-xr-x"));
nnfs.setPermission(CHILD_FILE1,new FsPermission("744"));
status=nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rwxr--r--"));
nnfs.setPermission(CHILD_FILE1,new FsPermission("700"));
UserGroupInformation userGroupInfo=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
FileSystem userfs=DFSTestUtil.getFileSystemAs(userGroupInfo,conf);
userfs.mkdirs(CHILD_DIR1);
assertTrue(!canMkdirs(userfs,CHILD_DIR2));
assertTrue(!canCreate(userfs,CHILD_FILE2));
assertTrue(!canOpen(userfs,CHILD_FILE1));
nnfs.setPermission(ROOT_PATH,new FsPermission((short)0755));
nnfs.setPermission(CHILD_DIR1,new FsPermission("777"));
nnfs.setPermission(new Path("/"),new FsPermission((short)0777));
final Path RENAME_PATH=new Path("/foo/bar");
userfs.mkdirs(RENAME_PATH);
assertTrue(canRename(userfs,RENAME_PATH,CHILD_DIR1));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test HADOOP_PROXY_USER for impersonation
*/
@Test public void testProxyUserFromEnvironment() throws IOException {
String proxyUser="foo.bar";
System.setProperty(UserGroupInformation.HADOOP_PROXY_USER,proxyUser);
UserGroupInformation ugi=UserGroupInformation.getLoginUser();
assertEquals(proxyUser,ugi.getUserName());
UserGroupInformation realUgi=ugi.getRealUser();
assertNotNull(realUgi);
Process pp=Runtime.getRuntime().exec("whoami");
BufferedReader br=new BufferedReader(new InputStreamReader(pp.getInputStream()));
String realUser=br.readLine().trim();
int backslashIndex=realUser.indexOf('\\');
if (backslashIndex != -1) {
realUser=realUser.substring(backslashIndex + 1);
}
assertEquals(realUser,realUgi.getUserName());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGroupMappingRefresh() throws Exception {
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refreshUserToGroupsMappings"};
Groups groups=Groups.getUserToGroupsMappingService(config);
String user=UserGroupInformation.getCurrentUser().getUserName();
System.out.println("first attempt:");
List g1=groups.getGroups(user);
String[] str_groups=new String[g1.size()];
g1.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
System.out.println("second attempt, should be same:");
List g2=groups.getGroups(user);
g2.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g2.size(); i++) {
assertEquals("Should be same group ",g1.get(i),g2.get(i));
}
admin.run(args);
System.out.println("third attempt(after refresh command), should be different:");
List g3=groups.getGroups(user);
g3.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g3.size(); i++) {
assertFalse("Should be different group: " + g1.get(i) + " and "+ g3.get(i),g1.get(i).equals(g3.get(i)));
}
Thread.sleep(groupRefreshTimeoutSec * 1100);
System.out.println("fourth attempt(after timeout), should be different:");
List g4=groups.getGroups(user);
g4.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g4.size(); i++) {
assertFalse("Should be different group ",g3.get(i).equals(g4.get(i)));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetAuthenticationMethod(){
Configuration conf=new Configuration();
conf.unset(HADOOP_SECURITY_AUTHENTICATION);
assertEquals(SIMPLE,SecurityUtil.getAuthenticationMethod(conf));
conf.set(HADOOP_SECURITY_AUTHENTICATION,"simple");
assertEquals(SIMPLE,SecurityUtil.getAuthenticationMethod(conf));
conf.set(HADOOP_SECURITY_AUTHENTICATION,"kerberos");
assertEquals(KERBEROS,SecurityUtil.getAuthenticationMethod(conf));
conf.set(HADOOP_SECURITY_AUTHENTICATION,"kaboom");
String error=null;
try {
SecurityUtil.getAuthenticationMethod(conf);
}
catch ( Exception e) {
error=e.toString();
}
assertEquals("java.lang.IllegalArgumentException: " + "Invalid attribute value for " + HADOOP_SECURITY_AUTHENTICATION + " of kaboom",error);
}
APIUtilityVerifier UtilityVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testLogin() throws IOException {
String userPrincipal=System.getProperty("user.principal");
String userKeyTab=System.getProperty("user.keytab");
Assert.assertNotNull("User principal was not specified",userPrincipal);
Assert.assertNotNull("User keytab was not specified",userKeyTab);
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi=UserGroupInformation.loginUserFromKeytabAndReturnUGI(userPrincipal,userKeyTab);
Assert.assertEquals(AuthenticationMethod.KERBEROS,ugi.getAuthenticationMethod());
try {
UserGroupInformation.loginUserFromKeytabAndReturnUGI("bogus@EXAMPLE.COM",userKeyTab);
Assert.fail("Login should have failed");
}
catch ( Exception ex) {
ex.printStackTrace();
}
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test public void testLogin() throws IOException {
String nn1keyTabFilepath=System.getProperty("kdc.resource.dir") + "/keytabs/nn1.keytab";
String user1keyTabFilepath=System.getProperty("kdc.resource.dir") + "/keytabs/user1.keytab";
Configuration conf=new Configuration();
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,conf);
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugiNn=UserGroupInformation.loginUserFromKeytabAndReturnUGI("nn1/localhost@EXAMPLE.COM",nn1keyTabFilepath);
UserGroupInformation ugiDn=UserGroupInformation.loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM",user1keyTabFilepath);
Assert.assertEquals(AuthenticationMethod.KERBEROS,ugiNn.getAuthenticationMethod());
Assert.assertEquals(AuthenticationMethod.KERBEROS,ugiDn.getAuthenticationMethod());
try {
UserGroupInformation.loginUserFromKeytabAndReturnUGI("bogus@EXAMPLE.COM",nn1keyTabFilepath);
Assert.fail("Login should have failed");
}
catch ( Exception ex) {
ex.printStackTrace();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* given user name - get all the groups.
* Needs to happen before creating the test users
*/
@Test(timeout=30000) public void testGetServerSideGroups() throws IOException, InterruptedException {
Process pp=Runtime.getRuntime().exec("whoami");
BufferedReader br=new BufferedReader(new InputStreamReader(pp.getInputStream()));
String userName=br.readLine().trim();
if (Shell.WINDOWS) {
int sp=userName.lastIndexOf('\\');
if (sp != -1) {
userName=userName.substring(sp + 1);
}
userName=userName.toLowerCase();
}
pp=Runtime.getRuntime().exec(Shell.WINDOWS ? Shell.WINUTILS + " groups -F" : "id -Gn");
br=new BufferedReader(new InputStreamReader(pp.getInputStream()));
String line=br.readLine();
System.out.println(userName + ":" + line);
Set groups=new LinkedHashSet();
String[] tokens=line.split(Shell.TOKEN_SEPARATOR_REGEX);
for ( String s : tokens) {
groups.add(s);
}
final UserGroupInformation login=UserGroupInformation.getCurrentUser();
String loginUserName=login.getShortUserName();
if (Shell.WINDOWS) {
loginUserName=loginUserName.toLowerCase();
}
assertEquals(userName,loginUserName);
String[] gi=login.getGroupNames();
assertEquals(groups.size(),gi.length);
for (int i=0; i < gi.length; i++) {
assertTrue(groups.contains(gi[i]));
}
final UserGroupInformation fakeUser=UserGroupInformation.createRemoteUser("foo.bar");
fakeUser.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws IOException {
UserGroupInformation current=UserGroupInformation.getCurrentUser();
assertFalse(current.equals(login));
assertEquals(current,fakeUser);
assertEquals(0,current.getGroupNames().length);
return null;
}
}
);
}
APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testGetUGIFromSubject() throws Exception {
KerberosPrincipal p=new KerberosPrincipal("guest");
Subject subject=new Subject();
subject.getPrincipals().add(p);
UserGroupInformation ugi=UserGroupInformation.getUGIFromSubject(subject);
assertNotNull(ugi);
assertEquals("guest@DEFAULT.REALM",ugi.getUserName());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test login method
*/
@Test(timeout=30000) public void testLogin() throws Exception {
conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,String.valueOf(PERCENTILES_INTERVAL));
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
assertEquals(UserGroupInformation.getCurrentUser(),UserGroupInformation.getLoginUser());
assertTrue(ugi.getGroupNames().length >= 1);
verifyGroupMetrics(1);
UserGroupInformation userGroupInfo=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
UserGroupInformation curUGI=userGroupInfo.doAs(new PrivilegedExceptionAction(){
@Override public UserGroupInformation run() throws IOException {
return UserGroupInformation.getCurrentUser();
}
}
);
assertEquals(curUGI,userGroupInfo);
assertFalse(curUGI.equals(UserGroupInformation.getLoginUser()));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testGettingGroups() throws Exception {
UserGroupInformation uugi=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
assertEquals(USER_NAME,uugi.getUserName());
assertArrayEquals(new String[]{GROUP1_NAME,GROUP2_NAME,GROUP3_NAME},uugi.getGroupNames());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testUGIAuthMethodInRealUser() throws Exception {
final UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
UserGroupInformation proxyUgi=UserGroupInformation.createProxyUser("proxy",ugi);
final AuthenticationMethod am=AuthenticationMethod.KERBEROS;
ugi.setAuthenticationMethod(am);
Assert.assertEquals(am,ugi.getAuthenticationMethod());
Assert.assertEquals(AuthenticationMethod.PROXY,proxyUgi.getAuthenticationMethod());
Assert.assertEquals(am,UserGroupInformation.getRealAuthenticationMethod(proxyUgi));
proxyUgi.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws IOException {
Assert.assertEquals(AuthenticationMethod.PROXY,UserGroupInformation.getCurrentUser().getAuthenticationMethod());
Assert.assertEquals(am,UserGroupInformation.getCurrentUser().getRealUser().getAuthenticationMethod());
return null;
}
}
);
UserGroupInformation proxyUgi2=new UserGroupInformation(proxyUgi.getSubject());
proxyUgi2.setAuthenticationMethod(AuthenticationMethod.PROXY);
Assert.assertEquals(proxyUgi,proxyUgi2);
UserGroupInformation realugi=UserGroupInformation.getCurrentUser();
UserGroupInformation proxyUgi3=UserGroupInformation.createProxyUser("proxyAnother",realugi);
UserGroupInformation proxyUgi4=new UserGroupInformation(proxyUgi3.getSubject());
Assert.assertEquals(proxyUgi3,proxyUgi4);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testEqualsWithRealUser() throws Exception {
UserGroupInformation realUgi1=UserGroupInformation.createUserForTesting("RealUser",GROUP_NAMES);
UserGroupInformation proxyUgi1=UserGroupInformation.createProxyUser(USER_NAME,realUgi1);
UserGroupInformation proxyUgi2=new UserGroupInformation(proxyUgi1.getSubject());
UserGroupInformation remoteUgi=UserGroupInformation.createRemoteUser(USER_NAME);
assertEquals(proxyUgi1,proxyUgi2);
assertFalse(remoteUgi.equals(proxyUgi1));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testGetRealAuthenticationMethod(){
UserGroupInformation ugi=UserGroupInformation.createRemoteUser("user1");
ugi.setAuthenticationMethod(AuthenticationMethod.SIMPLE);
assertEquals(AuthenticationMethod.SIMPLE,ugi.getAuthenticationMethod());
assertEquals(AuthenticationMethod.SIMPLE,ugi.getRealAuthenticationMethod());
ugi=UserGroupInformation.createProxyUser("user2",ugi);
assertEquals(AuthenticationMethod.PROXY,ugi.getAuthenticationMethod());
assertEquals(AuthenticationMethod.SIMPLE,ugi.getRealAuthenticationMethod());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testEquals() throws Exception {
UserGroupInformation uugi=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
assertEquals(uugi,uugi);
UserGroupInformation ugi2=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
assertFalse(uugi.equals(ugi2));
assertFalse(uugi.hashCode() == ugi2.hashCode());
UserGroupInformation ugi3=new UserGroupInformation(uugi.getSubject());
assertEquals(uugi,ugi3);
assertEquals(uugi.hashCode(),ugi3.hashCode());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFactory() throws Exception {
Configuration conf=new Configuration();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,UserProvider.SCHEME_NAME + ":///," + JavaKeyStoreProvider.SCHEME_NAME+ "://file"+ tmpDir+ "/test.jks");
List providers=CredentialProviderFactory.getProviders(conf);
assertEquals(2,providers.size());
assertEquals(UserProvider.class,providers.get(0).getClass());
assertEquals(JavaKeyStoreProvider.class,providers.get(1).getClass());
assertEquals(UserProvider.SCHEME_NAME + ":///",providers.get(0).toString());
assertEquals(JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks",providers.get(1).toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testNotAuthenticated() throws Exception {
AuthenticatorTestCase auth=new AuthenticatorTestCase();
AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration());
auth.start();
try {
URL url=new URL(auth.getBaseURL());
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.connect();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode());
Assert.assertTrue(conn.getHeaderField(KerberosAuthenticator.WWW_AUTHENTICATE) != null);
}
finally {
auth.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAnonymousDisallowed() throws Exception {
AuthenticatorTestCase auth=new AuthenticatorTestCase();
AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(false));
auth.start();
try {
URL url=new URL(auth.getBaseURL());
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.connect();
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,conn.getResponseCode());
Assert.assertEquals("Anonymous requests are disallowed",conn.getResponseMessage());
}
finally {
auth.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAnonymousAllowed() throws Exception {
AuthenticatorTestCase auth=new AuthenticatorTestCase();
AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(true));
auth.start();
try {
URL url=new URL(auth.getBaseURL());
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.connect();
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
}
finally {
auth.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testNonDefaultNonBrowserUserAgentAsBrowser() throws Exception {
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
HttpServletResponse response=Mockito.mock(HttpServletResponse.class);
if (handler != null) {
handler.destroy();
handler=null;
}
handler=getNewAuthenticationHandler();
Properties props=getDefaultProperties();
props.setProperty("alt-kerberos.non-browser.user-agents","foo, bar");
try {
handler.init(props);
}
catch ( Exception ex) {
handler=null;
throw ex;
}
Mockito.when(request.getHeader("User-Agent")).thenReturn("blah");
AuthenticationToken token=handler.authenticate(request,response);
Assert.assertEquals("A",token.getUserName());
Assert.assertEquals("B",token.getName());
Assert.assertEquals(getExpectedType(),token.getType());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testAlternateAuthenticationAsBrowser() throws Exception {
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
HttpServletResponse response=Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getHeader("User-Agent")).thenReturn("Some Browser");
AuthenticationToken token=handler.authenticate(request,response);
Assert.assertEquals("A",token.getUserName());
Assert.assertEquals("B",token.getName());
Assert.assertEquals(getExpectedType(),token.getType());
}
APIUtilityVerifier UtilityVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testDoFilterAuthenticationFailure() throws Exception {
AuthenticationFilter filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,"management.operation.return")).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar"));
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{});
Mockito.when(request.getHeader("WWW-Authenticate")).thenReturn("dummyauth");
HttpServletResponse response=Mockito.mock(HttpServletResponse.class);
FilterChain chain=Mockito.mock(FilterChain.class);
final HashMap cookieMap=new HashMap();
Mockito.doAnswer(new Answer(){
@Override public Object answer( InvocationOnMock invocation) throws Throwable {
Object[] args=invocation.getArguments();
parseCookieMap((String)args[1],cookieMap);
return null;
}
}
).when(response).addHeader(Mockito.eq("Set-Cookie"),Mockito.anyString());
Mockito.doAnswer(new Answer(){
@Override public Object answer( InvocationOnMock invocation) throws Throwable {
Assert.fail("shouldn't get here");
return null;
}
}
).when(chain).doFilter(Mockito.anyObject(),Mockito.anyObject());
filter.doFilter(request,response,chain);
Mockito.verify(response).sendError(HttpServletResponse.SC_FORBIDDEN,"AUTH FAILED");
Mockito.verify(response,Mockito.never()).setHeader(Mockito.eq("WWW-Authenticate"),Mockito.anyString());
String value=cookieMap.get(AuthenticatedURL.AUTH_COOKIE);
Assert.assertNotNull("cookie missing",value);
Assert.assertEquals("",value);
}
finally {
filter.destroy();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testToStringAndParse() throws Exception {
long expires=System.currentTimeMillis() + 50;
AuthenticationToken token=new AuthenticationToken("u","p","t");
token.setExpires(expires);
String str=token.toString();
token=AuthenticationToken.parse(str);
Assert.assertEquals("p",token.getName());
Assert.assertEquals("t",token.getType());
Assert.assertEquals(expires,token.getExpires());
Assert.assertFalse(token.isExpired());
Thread.sleep(70);
Assert.assertTrue(token.isExpired());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testDynamicPrincipalDiscovery() throws Exception {
String[] keytabUsers=new String[]{"HTTP/host1","HTTP/host2","HTTP2/host1","XHTTP/host"};
String keytab=KerberosTestUtils.getKeytabFile();
getKdc().createPrincipal(new File(keytab),keytabUsers);
handler.destroy();
Properties props=new Properties();
props.setProperty(KerberosAuthenticationHandler.KEYTAB,keytab);
props.setProperty(KerberosAuthenticationHandler.PRINCIPAL,"*");
handler=getNewAuthenticationHandler();
handler.init(props);
Assert.assertEquals(KerberosTestUtils.getKeytabFile(),handler.getKeytab());
Set loginPrincipals=handler.getPrincipals();
for ( String user : keytabUsers) {
Principal principal=new KerberosPrincipal(user + "@" + KerberosTestUtils.getRealm());
boolean expected=user.startsWith("HTTP/");
Assert.assertEquals("checking for " + user,expected,loginPrincipals.contains(principal));
}
}
APIUtilityVerifier BranchVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetPrincipalNamesFromKeytabWithPattern() throws IOException {
createKeyTab(testKeytab,testPrincipals);
Pattern httpPattern=Pattern.compile("HTTP/.*");
String[] httpPrincipals=KerberosUtil.getPrincipalNames(testKeytab,httpPattern);
Assert.assertNotNull("principals cannot be null",httpPrincipals);
int expectedSize=0;
List httpPrincipalList=Arrays.asList(httpPrincipals);
for ( String principal : testPrincipals) {
if (httpPattern.matcher(principal).matches()) {
Assert.assertTrue("missing principal " + principal,httpPrincipalList.contains(principal));
expectedSize++;
}
}
Assert.assertEquals(expectedSize,httpPrincipals.length);
}
APIUtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetPrincipalNamesFromKeytab() throws IOException {
createKeyTab(testKeytab,testPrincipals);
String[] principals=KerberosUtil.getPrincipalNames(testKeytab);
Assert.assertNotNull("principals cannot be null",principals);
int expectedSize=0;
List principalList=Arrays.asList(principals);
for ( String principal : testPrincipals) {
Assert.assertTrue("missing principal " + principal,principalList.contains(principal));
expectedSize++;
}
Assert.assertEquals(expectedSize,principals.length);
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetAndRollSecrets() throws Exception {
long rolloverFrequency=15 * 1000;
long seed=System.currentTimeMillis();
Random rand=new Random(seed);
byte[] secret1=Long.toString(rand.nextLong()).getBytes();
byte[] secret2=Long.toString(rand.nextLong()).getBytes();
byte[] secret3=Long.toString(rand.nextLong()).getBytes();
RandomSignerSecretProvider secretProvider=new RandomSignerSecretProvider(seed);
try {
secretProvider.init(null,rolloverFrequency);
byte[] currentSecret=secretProvider.getCurrentSecret();
byte[][] allSecrets=secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret1,currentSecret);
Assert.assertEquals(2,allSecrets.length);
Assert.assertArrayEquals(secret1,allSecrets[0]);
Assert.assertNull(allSecrets[1]);
Thread.sleep(rolloverFrequency + 2000);
currentSecret=secretProvider.getCurrentSecret();
allSecrets=secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret2,currentSecret);
Assert.assertEquals(2,allSecrets.length);
Assert.assertArrayEquals(secret2,allSecrets[0]);
Assert.assertArrayEquals(secret1,allSecrets[1]);
Thread.sleep(rolloverFrequency + 2000);
currentSecret=secretProvider.getCurrentSecret();
allSecrets=secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret3,currentSecret);
Assert.assertEquals(2,allSecrets.length);
Assert.assertArrayEquals(secret3,allSecrets[0]);
Assert.assertArrayEquals(secret2,allSecrets[1]);
Thread.sleep(rolloverFrequency + 2000);
}
finally {
secretProvider.destroy();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetSecrets() throws Exception {
String secretStr="secret";
StringSignerSecretProvider secretProvider=new StringSignerSecretProvider(secretStr);
secretProvider.init(null,-1);
byte[] secretBytes=secretStr.getBytes();
Assert.assertArrayEquals(secretBytes,secretProvider.getCurrentSecret());
byte[][] allSecrets=secretProvider.getAllSecrets();
Assert.assertEquals(1,allSecrets.length);
Assert.assertArrayEquals(secretBytes,allSecrets[0]);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAccessControlList() throws Exception {
AccessControlList acl;
Collection users;
Collection groups;
acl=new AccessControlList("drwho tardis");
users=acl.getUsers();
assertEquals(users.size(),1);
assertEquals(users.iterator().next(),"drwho");
groups=acl.getGroups();
assertEquals(groups.size(),1);
assertEquals(groups.iterator().next(),"tardis");
acl=new AccessControlList("drwho");
users=acl.getUsers();
assertEquals(users.size(),1);
assertEquals(users.iterator().next(),"drwho");
groups=acl.getGroups();
assertEquals(groups.size(),0);
acl=new AccessControlList("drwho ");
users=acl.getUsers();
assertEquals(users.size(),1);
assertEquals(users.iterator().next(),"drwho");
groups=acl.getGroups();
assertEquals(groups.size(),0);
acl=new AccessControlList(" tardis");
users=acl.getUsers();
assertEquals(users.size(),0);
groups=acl.getGroups();
assertEquals(groups.size(),1);
assertEquals(groups.iterator().next(),"tardis");
Iterator iter;
acl=new AccessControlList("drwho,joe tardis, users");
users=acl.getUsers();
assertEquals(users.size(),2);
iter=users.iterator();
assertEquals(iter.next(),"drwho");
assertEquals(iter.next(),"joe");
groups=acl.getGroups();
assertEquals(groups.size(),2);
iter=groups.iterator();
assertEquals(iter.next(),"tardis");
assertEquals(iter.next(),"users");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test addUser/Group and removeUser/Group api.
*/
@Test public void testAddRemoveAPI(){
AccessControlList acl;
Collection users;
Collection groups;
acl=new AccessControlList(" ");
assertEquals(0,acl.getUsers().size());
assertEquals(0,acl.getGroups().size());
assertEquals(" ",acl.getAclString());
acl.addUser("drwho");
users=acl.getUsers();
assertEquals(users.size(),1);
assertEquals(users.iterator().next(),"drwho");
assertEquals("drwho ",acl.getAclString());
acl.addGroup("tardis");
groups=acl.getGroups();
assertEquals(groups.size(),1);
assertEquals(groups.iterator().next(),"tardis");
assertEquals("drwho tardis",acl.getAclString());
acl.addUser("joe");
acl.addGroup("users");
users=acl.getUsers();
assertEquals(users.size(),2);
Iterator iter=users.iterator();
assertEquals(iter.next(),"drwho");
assertEquals(iter.next(),"joe");
groups=acl.getGroups();
assertEquals(groups.size(),2);
iter=groups.iterator();
assertEquals(iter.next(),"tardis");
assertEquals(iter.next(),"users");
assertEquals("drwho,joe tardis,users",acl.getAclString());
acl.removeUser("joe");
acl.removeGroup("users");
users=acl.getUsers();
assertEquals(users.size(),1);
assertFalse(users.contains("joe"));
groups=acl.getGroups();
assertEquals(groups.size(),1);
assertFalse(groups.contains("users"));
assertEquals("drwho tardis",acl.getAclString());
acl.removeGroup("tardis");
groups=acl.getGroups();
assertEquals(0,groups.size());
assertFalse(groups.contains("tardis"));
assertEquals("drwho ",acl.getAclString());
acl.removeUser("drwho");
assertEquals(0,users.size());
assertFalse(users.contains("drwho"));
assertEquals(0,acl.getGroups().size());
assertEquals(0,acl.getUsers().size());
assertEquals(" ",acl.getAclString());
}
APIUtilityVerifier EqualityVerifier
@Test public void testWithProxyGroupsAndUsersWithSpaces() throws Exception {
Configuration conf=new Configuration();
conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserUserConfKey(REAL_USER_NAME),StringUtils.join(",",Arrays.asList(PROXY_USER_NAME + " ",AUTHORIZED_PROXY_USER_NAME,"ONEMORE")));
conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER_NAME),StringUtils.join(",",Arrays.asList(GROUP_NAMES)));
conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(REAL_USER_NAME),PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
Collection groupsToBeProxied=ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER_NAME));
assertEquals(GROUP_NAMES.length,groupsToBeProxied.size());
}
APIUtilityVerifier EqualityVerifier
@Test public void testWithDuplicateProxyHosts() throws Exception {
Configuration conf=new Configuration();
conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER_NAME),StringUtils.join(",",Arrays.asList(GROUP_NAMES)));
conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(REAL_USER_NAME),StringUtils.join(",",Arrays.asList(PROXY_IP,PROXY_IP)));
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
Collection hosts=ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(REAL_USER_NAME));
assertEquals(1,hosts.size());
}
APIUtilityVerifier EqualityVerifier
@Test public void testWithDuplicateProxyGroups() throws Exception {
Configuration conf=new Configuration();
conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER_NAME),StringUtils.join(",",Arrays.asList(GROUP_NAMES,GROUP_NAMES)));
conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(REAL_USER_NAME),PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
Collection groupsToBeProxied=ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER_NAME));
assertEquals(1,groupsToBeProxied.size());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testParallelDelegationTokenCreation() throws Exception {
final TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(2000,24 * 60 * 60* 1000,7 * 24 * 60* 60* 1000,2000);
try {
dtSecretManager.startThreads();
int numThreads=100;
final int numTokensPerThread=100;
class tokenIssuerThread implements Runnable {
@Override public void run(){
for (int i=0; i < numTokensPerThread; i++) {
generateDelegationToken(dtSecretManager,"auser","arenewer");
try {
Thread.sleep(250);
}
catch ( Exception e) {
}
}
}
}
Thread[] issuers=new Thread[numThreads];
for (int i=0; i < numThreads; i++) {
issuers[i]=new Daemon(new tokenIssuerThread());
issuers[i].start();
}
for (int i=0; i < numThreads; i++) {
issuers[i].join();
}
Map tokenCache=dtSecretManager.getAllTokens();
Assert.assertEquals(numTokensPerThread * numThreads,tokenCache.size());
Iterator iter=tokenCache.keySet().iterator();
while (iter.hasNext()) {
TestDelegationTokenIdentifier id=iter.next();
DelegationTokenInformation info=tokenCache.get(id);
Assert.assertTrue(info != null);
DelegationKey key=dtSecretManager.getKey(id);
Assert.assertTrue(key != null);
byte[] storedPassword=dtSecretManager.retrievePassword(id);
byte[] password=dtSecretManager.createPassword(id,key);
Assert.assertTrue(Arrays.equals(password,storedPassword));
dtSecretManager.verifyToken(id,password);
}
}
finally {
dtSecretManager.stopThreads();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDelegationTokenSecretManager() throws Exception {
final TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(24 * 60 * 60* 1000,3 * 1000,1 * 1000,3600000);
try {
dtSecretManager.startThreads();
final Token token=generateDelegationToken(dtSecretManager,"SomeUser","JobTracker");
Assert.assertTrue(dtSecretManager.isStoreNewTokenCalled);
shouldThrow(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
dtSecretManager.renewToken(token,"FakeRenewer");
return null;
}
}
,AccessControlException.class);
long time=dtSecretManager.renewToken(token,"JobTracker");
Assert.assertTrue(dtSecretManager.isUpdateStoredTokenCalled);
assertTrue("renew time is in future",time > Time.now());
TestDelegationTokenIdentifier identifier=new TestDelegationTokenIdentifier();
byte[] tokenId=token.getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
LOG.info("Sleep to expire the token");
Thread.sleep(2000);
try {
dtSecretManager.retrievePassword(identifier);
Assert.fail("Token should have expired");
}
catch ( InvalidToken e) {
}
dtSecretManager.renewToken(token,"JobTracker");
LOG.info("Sleep beyond the max lifetime");
Thread.sleep(2000);
shouldThrow(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
dtSecretManager.renewToken(token,"JobTracker");
return null;
}
}
,InvalidToken.class);
}
finally {
dtSecretManager.stopThreads();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings("unchecked") public void testDelegationTokenSelector() throws Exception {
TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(24 * 60 * 60* 1000,10 * 1000,1 * 1000,3600000);
try {
dtSecretManager.startThreads();
AbstractDelegationTokenSelector ds=new AbstractDelegationTokenSelector(KIND);
Token token1=generateDelegationToken(dtSecretManager,"SomeUser1","JobTracker");
token1.setService(new Text("MY-SERVICE1"));
Token token2=generateDelegationToken(dtSecretManager,"SomeUser2","JobTracker");
token2.setService(new Text("MY-SERVICE2"));
List> tokens=new ArrayList>();
tokens.add(token1);
tokens.add(token2);
Token t=ds.selectToken(new Text("MY-SERVICE1"),tokens);
Assert.assertEquals(t,token1);
}
finally {
dtSecretManager.stopThreads();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testRollMasterKey() throws Exception {
TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(800,800,1 * 1000,3600000);
try {
dtSecretManager.startThreads();
Token token=generateDelegationToken(dtSecretManager,"SomeUser","JobTracker");
byte[] oldPasswd=token.getPassword();
int prevNumKeys=dtSecretManager.getAllKeys().length;
dtSecretManager.rollMasterKey();
Assert.assertTrue(dtSecretManager.isStoreNewMasterKeyCalled);
int currNumKeys=dtSecretManager.getAllKeys().length;
Assert.assertEquals((currNumKeys - prevNumKeys) >= 1,true);
ByteArrayInputStream bi=new ByteArrayInputStream(token.getIdentifier());
TestDelegationTokenIdentifier identifier=dtSecretManager.createIdentifier();
identifier.readFields(new DataInputStream(bi));
byte[] newPasswd=dtSecretManager.retrievePassword(identifier);
Assert.assertEquals(oldPasswd,newPasswd);
while (!dtSecretManager.isRemoveStoredMasterKeyCalled) {
Thread.sleep(200);
}
}
finally {
dtSecretManager.stopThreads();
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRawHttpCalls() throws Exception {
final Server jetty=createJettyServer();
Context context=new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class),"/*",0);
context.addServlet(new ServletHolder(PingServlet.class),"/bar");
try {
jetty.start();
URL nonAuthURL=new URL(getJettyURL() + "/foo/bar");
URL authURL=new URL(getJettyURL() + "/foo/bar?authenticated=foo");
HttpURLConnection conn=(HttpURLConnection)nonAuthURL.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode());
conn=(HttpURLConnection)authURL.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
URL url=new URL(nonAuthURL.toExternalForm() + "?op=GETDELEGATIONTOKEN");
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode());
url=new URL(authURL.toExternalForm() + "&op=GETDELEGATIONTOKEN&renewer=foo");
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
ObjectMapper mapper=new ObjectMapper();
Map map=mapper.readValue(conn.getInputStream(),Map.class);
String dt=(String)((Map)map.get("Token")).get("urlString");
Assert.assertNotNull(dt);
url=new URL(nonAuthURL.toExternalForm() + "?delegation=" + dt);
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
url=new URL(authURL.toExternalForm() + "&delegation=" + dt);
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
url=new URL(nonAuthURL.toExternalForm() + "?op=RENEWDELEGATIONTOKEN&token=" + dt);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode());
url=new URL(authURL.toExternalForm() + "&op=RENEWDELEGATIONTOKEN&token=" + dt);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
url=new URL(getJettyURL() + "/foo/bar?authenticated=bar&op=RENEWDELEGATIONTOKEN&token=" + dt);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,conn.getResponseCode());
url=new URL(nonAuthURL.toExternalForm() + "?op=CANCELDELEGATIONTOKEN&token=" + dt);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
url=new URL(nonAuthURL.toExternalForm() + "?op=CANCELDELEGATIONTOKEN&token=" + dt);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_NOT_FOUND,conn.getResponseCode());
url=new URL(authURL.toExternalForm() + "&op=GETDELEGATIONTOKEN&renewer=foo");
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
mapper=new ObjectMapper();
map=mapper.readValue(conn.getInputStream(),Map.class);
dt=(String)((Map)map.get("Token")).get("urlString");
Assert.assertNotNull(dt);
url=new URL(authURL.toExternalForm() + "&op=CANCELDELEGATIONTOKEN&token=" + dt);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
}
finally {
jetty.stop();
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testFallbackToPseudoDelegationTokenAuthenticator() throws Exception {
final Server jetty=createJettyServer();
Context context=new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class),"/*",0);
context.addServlet(new ServletHolder(UserServlet.class),"/bar");
try {
jetty.start();
final URL url=new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi=UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL();
HttpURLConnection conn=aUrl.openConnection(url,token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
List ret=IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1,ret.size());
Assert.assertEquals(FOO_USER,ret.get(0));
aUrl.getDelegationToken(url,token,FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("token-kind"),token.getDelegationToken().getKind());
return null;
}
}
);
}
finally {
jetty.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testHttpUGI() throws Exception {
final Server jetty=createJettyServer();
Context context=new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class),"/*",0);
context.addServlet(new ServletHolder(UGIServlet.class),"/bar");
try {
jetty.start();
final URL url=new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi=UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL();
HttpURLConnection conn=aUrl.openConnection(url,token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
List ret=IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1,ret.size());
Assert.assertEquals("remoteuser=" + FOO_USER + ":ugi="+ FOO_USER,ret.get(0));
conn=aUrl.openConnection(url,token,OK_USER);
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
ret=IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1,ret.size());
Assert.assertEquals("realugi=" + FOO_USER + ":remoteuser="+ OK_USER+ ":ugi="+ OK_USER,ret.get(0));
return null;
}
}
);
}
finally {
jetty.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testProxyUser() throws Exception {
final Server jetty=createJettyServer();
Context context=new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class),"/*",0);
context.addServlet(new ServletHolder(UserServlet.class),"/bar");
try {
jetty.start();
final URL url=new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi=UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL();
HttpURLConnection conn=aUrl.openConnection(url,token,OK_USER);
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
List ret=IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1,ret.size());
Assert.assertEquals(OK_USER,ret.get(0));
conn=aUrl.openConnection(url,token,FAIL_USER);
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,conn.getResponseCode());
aUrl.getDelegationToken(url,token,FOO_USER);
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
ugi.addToken(token.getDelegationToken());
token=new DelegationTokenAuthenticatedURL.Token();
conn=aUrl.openConnection(url,token,OK_USER);
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
ret=IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1,ret.size());
Assert.assertEquals(FOO_USER,ret.get(0));
return null;
}
}
);
}
finally {
jetty.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRemoveService(){
CompositeService testService=new CompositeService("TestService"){
@Override public void serviceInit( Configuration conf){
Integer notAService=new Integer(0);
assertFalse("Added an integer as a service",addIfService(notAService));
Service service1=new AbstractService("Service1"){
}
;
addIfService(service1);
Service service2=new AbstractService("Service2"){
}
;
addIfService(service2);
Service service3=new AbstractService("Service3"){
}
;
addIfService(service3);
removeService(service1);
}
}
;
testService.init(new Configuration());
assertEquals("Incorrect number of services",2,testService.getServices().size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testAddIfService(){
CompositeService testService=new CompositeService("TestService"){
Service service;
@Override public void serviceInit( Configuration conf){
Integer notAService=new Integer(0);
assertFalse("Added an integer as a service",addIfService(notAService));
service=new AbstractService("Service"){
}
;
assertTrue("Unable to add a service",addIfService(service));
}
}
;
testService.init(new Configuration());
assertEquals("Incorrect number of services",1,testService.getServices().size());
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testGoodClassOrNull() throws Exception {
String NAME="ClassWithNoPackage";
ClassLoader cl=TestClassWithNoPackage.class.getClassLoader();
String JAR=JarFinder.getJar(cl.loadClass(NAME));
Configuration conf=new Configuration();
conf.setClassLoader(new URLClassLoader(new URL[]{new URL("file",null,JAR)},null));
String defaultPackage=this.getClass().getPackage().getName();
Class c=StreamUtil.goodClassOrNull(conf,NAME,defaultPackage);
assertNotNull("Class " + NAME + " not found!",c);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDumping() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
PrintStream psBackup=System.out;
ByteArrayOutputStream out=new ByteArrayOutputStream();
PrintStream psOut=new PrintStream(out);
System.setOut(psOut);
DumpTypedBytes dumptb=new DumpTypedBytes(conf);
try {
Path root=new Path("/typedbytestest");
assertTrue(fs.mkdirs(root));
assertTrue(fs.exists(root));
OutputStreamWriter writer=new OutputStreamWriter(fs.create(new Path(root,"test.txt")));
try {
for (int i=0; i < 100; i++) {
writer.write("" + (10 * i) + "\n");
}
}
finally {
writer.close();
}
String[] args=new String[1];
args[0]="/typedbytestest";
int ret=dumptb.run(args);
assertEquals("Return value != 0.",0,ret);
ByteArrayInputStream in=new ByteArrayInputStream(out.toByteArray());
TypedBytesInput tbinput=new TypedBytesInput(new DataInputStream(in));
int counter=0;
Object key=tbinput.read();
while (key != null) {
assertEquals(Long.class,key.getClass());
Object value=tbinput.read();
assertEquals(String.class,value.getClass());
assertTrue("Invalid output.",Integer.parseInt(value.toString()) % 10 == 0);
counter++;
key=tbinput.read();
}
assertEquals("Wrong number of outputs.",100,counter);
}
finally {
try {
fs.close();
}
catch ( Exception e) {
}
System.setOut(psBackup);
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMultipleCachefiles() throws Exception {
boolean mayExit=false;
MiniMRCluster mr=null;
MiniDFSCluster dfs=null;
try {
Configuration conf=new Configuration();
dfs=new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys=dfs.getFileSystem();
String namenode=fileSys.getUri().toString();
mr=new MiniMRCluster(1,namenode,3);
List args=new ArrayList();
for ( Map.Entry entry : mr.createJobConf()) {
args.add("-jobconf");
args.add(entry.getKey() + "=" + entry.getValue());
}
String argv[]=new String[]{"-input",INPUT_FILE,"-output",OUTPUT_DIR,"-mapper",map,"-reducer",reduce,"-jobconf","stream.tmpdir=" + System.getProperty("test.build.data","/tmp"),"-jobconf",JobConf.MAPRED_MAP_TASK_JAVA_OPTS + "=" + "-Dcontrib.name="+ System.getProperty("contrib.name")+ " "+ "-Dbuild.test="+ System.getProperty("build.test")+ " "+ conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS,conf.get(JobConf.MAPRED_TASK_JAVA_OPTS,"")),"-jobconf",JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + "=" + "-Dcontrib.name="+ System.getProperty("contrib.name")+ " "+ "-Dbuild.test="+ System.getProperty("build.test")+ " "+ conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS,conf.get(JobConf.MAPRED_TASK_JAVA_OPTS,"")),"-cacheFile",fileSys.getUri() + CACHE_FILE + "#"+ mapString,"-cacheFile",fileSys.getUri() + CACHE_FILE_2 + "#"+ mapString2,"-jobconf","mapred.jar=" + TestStreaming.STREAMING_JAR};
for ( String arg : argv) {
args.add(arg);
}
argv=args.toArray(new String[args.size()]);
fileSys.delete(new Path(OUTPUT_DIR),true);
DataOutputStream file=fileSys.create(new Path(INPUT_FILE));
file.writeBytes(mapString + "\n");
file.writeBytes(mapString2 + "\n");
file.close();
file=fileSys.create(new Path(CACHE_FILE));
file.writeBytes(cacheString + "\n");
file.close();
file=fileSys.create(new Path(CACHE_FILE_2));
file.writeBytes(cacheString2 + "\n");
file.close();
job=new StreamJob(argv,mayExit);
job.go();
fileSys=dfs.getFileSystem();
String line=null;
String line2=null;
Path[] fileList=FileUtil.stat2Paths(fileSys.listStatus(new Path(OUTPUT_DIR),new Utils.OutputFileUtils.OutputFilesFilter()));
for (int i=0; i < fileList.length; i++) {
System.out.println(fileList[i].toString());
BufferedReader bread=new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
line=bread.readLine();
System.out.println(line);
line2=bread.readLine();
System.out.println(line2);
}
assertEquals(cacheString + "\t",line);
assertEquals(cacheString2 + "\t",line2);
}
finally {
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.shutdown();
}
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testCommandLine() throws Exception {
try {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
catch ( Exception e) {
}
createInput();
OUTPUT_DIR.delete();
StreamJob job=new StreamJob();
job.setConf(new Configuration());
job.run(genArgs());
File outFile=new File(OUTPUT_DIR,"part-00000").getAbsoluteFile();
String output=StreamUtil.slurp(outFile);
outFile.delete();
System.out.println(" map=" + map);
System.out.println("reduce=" + reduce);
System.err.println("outEx1=" + outputExpect);
System.err.println(" out1=" + output);
assertEquals(outputExpect,output);
}
finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testCommandLine() throws Exception {
try {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
catch ( Exception e) {
}
createInput();
boolean mayExit=false;
job=new StreamJob(genArgs(),mayExit);
job.go();
File outFile=new File(OUTPUT_DIR,"part-00000").getAbsoluteFile();
String output=StreamUtil.slurp(outFile);
outFile.delete();
System.err.println("outEx1=" + outputExpect);
System.err.println(" out1=" + output);
assertEquals(outputExpect,output);
}
finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testCommandLine() throws Exception {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
catch ( Exception e) {
}
try {
createInput();
boolean mayExit=false;
job=new StreamJob(genArgs(),mayExit);
job.go();
File outFile=new File(OUTPUT_DIR,"part-00000").getAbsoluteFile();
String output=StreamUtil.slurp(outFile);
outFile.delete();
System.err.println("outEx1=" + outputExpect);
System.err.println(" out1=" + output);
System.err.println(" equals=" + outputExpect.compareTo(output));
assertEquals(outputExpect,output);
}
finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testCreateJob() throws IOException {
JobConf job;
ArrayList dummyArgs=new ArrayList();
dummyArgs.add("-input");
dummyArgs.add("dummy");
dummyArgs.add("-output");
dummyArgs.add("dummy");
dummyArgs.add("-mapper");
dummyArgs.add("dummy");
dummyArgs.add("-reducer");
dummyArgs.add("dummy");
ArrayList args;
args=new ArrayList(dummyArgs);
args.add("-inputformat");
args.add("org.apache.hadoop.mapred.KeyValueTextInputFormat");
job=StreamJob.createJob(args.toArray(new String[]{}));
assertEquals(KeyValueTextInputFormat.class,job.getInputFormat().getClass());
args=new ArrayList(dummyArgs);
args.add("-inputformat");
args.add("org.apache.hadoop.mapred.SequenceFileInputFormat");
job=StreamJob.createJob(args.toArray(new String[]{}));
assertEquals(SequenceFileInputFormat.class,job.getInputFormat().getClass());
args=new ArrayList(dummyArgs);
args.add("-inputformat");
args.add("org.apache.hadoop.mapred.KeyValueTextInputFormat");
args.add("-inputreader");
args.add("StreamXmlRecordReader,begin=,end= ");
job=StreamJob.createJob(args.toArray(new String[]{}));
assertEquals(StreamInputFormat.class,job.getInputFormat().getClass());
}
APIUtilityVerifier EqualityVerifier
@Test public void testCommandLine() throws Exception {
String outFileName="part-00000";
File outFile=null;
try {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
catch ( Exception e) {
}
createInput();
boolean mayExit=false;
job=new StreamJob(genArgs(),mayExit);
job.go();
outFile=new File(OUTPUT_DIR,outFileName).getAbsoluteFile();
String output=StreamUtil.slurp(outFile);
System.err.println("outEx1=" + outputExpect);
System.err.println(" out1=" + output);
assertEquals(outputExpect,output);
}
finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testCommandLine() throws Exception {
try {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
catch ( Exception e) {
}
createInput();
boolean mayExit=false;
job=new StreamJob(genArgs(),mayExit);
job.go();
File outFile=new File(OUTPUT_DIR,"part-00000").getAbsoluteFile();
String output=StreamUtil.slurp(outFile);
outFile.delete();
System.err.println("outEx1=" + outputExpect);
System.err.println(" out1=" + output);
assertEquals(outputExpect,output);
}
finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=120000) public void testSymLink() throws Exception {
boolean mayExit=false;
MiniMRCluster mr=null;
MiniDFSCluster dfs=null;
try {
Configuration conf=new Configuration();
dfs=new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys=dfs.getFileSystem();
String namenode=fileSys.getUri().toString();
mr=new MiniMRCluster(1,namenode,3);
List args=new ArrayList();
for ( Map.Entry entry : mr.createJobConf()) {
args.add("-jobconf");
args.add(entry.getKey() + "=" + entry.getValue());
}
String argv[]=new String[]{"-input",INPUT_FILE,"-output",OUTPUT_DIR,"-mapper",map,"-reducer",reduce,"-jobconf","stream.tmpdir=" + System.getProperty("test.build.data","/tmp"),"-jobconf",JobConf.MAPRED_MAP_TASK_JAVA_OPTS + "=" + "-Dcontrib.name="+ System.getProperty("contrib.name")+ " "+ "-Dbuild.test="+ System.getProperty("build.test")+ " "+ conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS,conf.get(JobConf.MAPRED_TASK_JAVA_OPTS,"")),"-jobconf",JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + "=" + "-Dcontrib.name="+ System.getProperty("contrib.name")+ " "+ "-Dbuild.test="+ System.getProperty("build.test")+ " "+ conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS,conf.get(JobConf.MAPRED_TASK_JAVA_OPTS,"")),"-cacheFile",fileSys.getUri() + CACHE_FILE + "#testlink","-jobconf","mapred.jar=" + TestStreaming.STREAMING_JAR};
for ( String arg : argv) {
args.add(arg);
}
argv=args.toArray(new String[args.size()]);
fileSys.delete(new Path(OUTPUT_DIR),true);
DataOutputStream file=fileSys.create(new Path(INPUT_FILE));
file.writeBytes(mapString);
file.close();
file=fileSys.create(new Path(CACHE_FILE));
file.writeBytes(cacheString);
file.close();
job=new StreamJob(argv,mayExit);
job.go();
fileSys=dfs.getFileSystem();
String line=null;
Path[] fileList=FileUtil.stat2Paths(fileSys.listStatus(new Path(OUTPUT_DIR),new Utils.OutputFileUtils.OutputFilesFilter()));
for (int i=0; i < fileList.length; i++) {
System.out.println(fileList[i].toString());
BufferedReader bread=new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
line=bread.readLine();
System.out.println(line);
}
assertEquals(cacheString + "\t",line);
}
finally {
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.shutdown();
}
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testCommandLine() throws Exception {
StreamJob job=new StreamJob();
job.setConf(new Configuration());
job.run(genArgs());
File outFile=new File(OUTPUT_DIR,"part-00000").getAbsoluteFile();
String output=StreamUtil.slurp(outFile);
outFile.delete();
System.out.println(" map=" + map);
System.out.println("reduce=" + reduce);
System.err.println("outEx1=" + outputExpect);
System.err.println(" out1=" + output);
assertEquals(outputExpect,output);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testUnconsumedInput() throws Exception {
String outFileName="part-00000";
File outFile=null;
try {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
catch ( Exception e) {
}
createInput();
Configuration conf=new Configuration();
conf.set("stream.minRecWrittenToEnableSkip_","0");
job=new StreamJob();
job.setConf(conf);
int exitCode=job.run(genArgs());
assertEquals("Job failed",0,exitCode);
outFile=new File(OUTPUT_DIR,outFileName).getAbsoluteFile();
String output=StreamUtil.slurp(outFile);
assertEquals("Output was truncated",EXPECTED_OUTPUT_SIZE,StringUtils.countMatches(output,"\t"));
}
finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
APIUtilityVerifier EqualityVerifier
@Test @TestJetty public void testJetty() throws Exception {
Context context=new Context();
context.setContextPath("/");
context.addServlet(MyServlet.class,"/bar");
Server server=TestJettyHelper.getJettyServer();
server.addHandler(context);
server.start();
URL url=new URL(TestJettyHelper.getJettyURL(),"/bar");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK);
BufferedReader reader=new BufferedReader(new InputStreamReader(conn.getInputStream()));
assertEquals(reader.readLine(),"foo");
reader.close();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @TestHdfs public void testHadoopFileSystem() throws Exception {
Configuration conf=TestHdfsHelper.getHdfsConf();
FileSystem fs=FileSystem.get(conf);
try {
OutputStream os=fs.create(new Path(TestHdfsHelper.getHdfsTestDir(),"foo"));
os.write(new byte[]{1});
os.close();
InputStream is=fs.open(new Path(TestHdfsHelper.getHdfsTestDir(),"foo"));
assertEquals(is.read(),1);
assertEquals(is.read(),-1);
is.close();
}
finally {
fs.close();
}
}
APIUtilityVerifier EqualityVerifier
@Test @TestJetty public void testJetty() throws Exception {
Context context=new Context();
context.setContextPath("/");
context.addServlet(MyServlet.class,"/bar");
Server server=TestJettyHelper.getJettyServer();
server.addHandler(context);
server.start();
URL url=new URL(TestJettyHelper.getJettyURL(),"/bar");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK);
BufferedReader reader=new BufferedReader(new InputStreamReader(conn.getInputStream()));
assertEquals(reader.readLine(),"foo");
reader.close();
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=500) public void testThreadDumpAndDeadlocks() throws Exception {
new Deadlock();
String s=null;
while (true) {
s=TimedOutTestsListener.buildDeadlockInfo();
if (s != null) break;
Thread.sleep(100);
}
Assert.assertEquals(3,countStringOccurrences(s,"BLOCKED"));
Failure failure=new Failure(null,new Exception(TimedOutTestsListener.TEST_TIMED_OUT_PREFIX));
StringWriter writer=new StringWriter();
new TimedOutTestsListener(new PrintWriter(writer)).testFailure(failure);
String out=writer.toString();
Assert.assertTrue(out.contains("THREAD DUMP"));
Assert.assertTrue(out.contains("DEADLOCKS DETECTED"));
System.out.println(out);
}
APIUtilityVerifier EqualityVerifier
@Test public void testMultipleNonExistingUsers() throws Exception {
String actualOutput=runTool(conf,new String[]{"does-not-exist1","does-not-exist2"},true);
assertEquals("Show the output for only the user given, with no groups",getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist1")) + getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist2")),actualOutput);
}
APIUtilityVerifier EqualityVerifier
@Test public void testNonExistentUser() throws Exception {
String actualOutput=runTool(conf,new String[]{"does-not-exist"},true);
assertEquals("Show the output for only the user given, with no groups",getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist")),actualOutput);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMultipleExistingUsers() throws Exception {
String actualOutput=runTool(conf,new String[]{testUser1.getUserName(),testUser2.getUserName()},true);
assertEquals("Show the output for both users given",getExpectedOutput(testUser1) + getExpectedOutput(testUser2),actualOutput);
}
APIUtilityVerifier EqualityVerifier
@Test public void testNoUserGiven() throws Exception {
String actualOutput=runTool(conf,new String[0],true);
UserGroupInformation currentUser=UserGroupInformation.getCurrentUser();
assertEquals("No user provided should default to current user",getExpectedOutput(currentUser),actualOutput);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testExistingInterleavedWithNonExistentUsers() throws Exception {
String actualOutput=runTool(conf,new String[]{"does-not-exist1",testUser1.getUserName(),"does-not-exist2",testUser2.getUserName()},true);
assertEquals("Show the output for only the user given, with no groups",getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist1")) + getExpectedOutput(testUser1) + getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist2"))+ getExpectedOutput(testUser2),actualOutput);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testExistingUser() throws Exception {
String actualOutput=runTool(conf,new String[]{testUser1.getUserName()},true);
assertEquals("Show only the output of the user given",getExpectedOutput(testUser1),actualOutput);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testSkipCopy() throws Exception {
SimpleCopyListing listing=new SimpleCopyListing(getConf(),CREDENTIALS){
@Override protected boolean shouldCopy( Path path, DistCpOptions options){
return !path.getName().equals(FileOutputCommitter.SUCCEEDED_FILE_NAME);
}
}
;
FileSystem fs=FileSystem.get(getConf());
List srcPaths=new ArrayList();
srcPaths.add(new Path("/tmp/in4/1"));
srcPaths.add(new Path("/tmp/in4/2"));
Path target=new Path("/tmp/out4/1");
TestDistCpUtils.createFile(fs,"/tmp/in4/1/_SUCCESS");
TestDistCpUtils.createFile(fs,"/tmp/in4/1/file");
TestDistCpUtils.createFile(fs,"/tmp/in4/2");
fs.mkdirs(target);
DistCpOptions options=new DistCpOptions(srcPaths,target);
Path listingFile=new Path("/tmp/list4");
listing.buildListing(listingFile,options);
Assert.assertEquals(listing.getNumberOfPaths(),3);
SequenceFile.Reader reader=new SequenceFile.Reader(getConf(),SequenceFile.Reader.file(listingFile));
CopyListingFileStatus fileStatus=new CopyListingFileStatus();
Text relativePath=new Text();
Assert.assertTrue(reader.next(relativePath,fileStatus));
Assert.assertEquals(relativePath.toString(),"/1");
Assert.assertTrue(reader.next(relativePath,fileStatus));
Assert.assertEquals(relativePath.toString(),"/1/file");
Assert.assertTrue(reader.next(relativePath,fileStatus));
Assert.assertEquals(relativePath.toString(),"/2");
Assert.assertFalse(reader.next(relativePath,fileStatus));
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testBuildListingForSingleFile(){
FileSystem fs=null;
String testRootString="/singleFileListing";
Path testRoot=new Path(testRootString);
SequenceFile.Reader reader=null;
try {
fs=FileSystem.get(getConf());
if (fs.exists(testRoot)) TestDistCpUtils.delete(fs,testRootString);
Path sourceFile=new Path(testRoot,"/source/foo/bar/source.txt");
Path decoyFile=new Path(testRoot,"/target/moo/source.txt");
Path targetFile=new Path(testRoot,"/target/moo/target.txt");
TestDistCpUtils.createFile(fs,sourceFile.toString());
TestDistCpUtils.createFile(fs,decoyFile.toString());
TestDistCpUtils.createFile(fs,targetFile.toString());
List srcPaths=new ArrayList();
srcPaths.add(sourceFile);
DistCpOptions options=new DistCpOptions(srcPaths,targetFile);
CopyListing listing=new SimpleCopyListing(getConf(),CREDENTIALS);
final Path listFile=new Path(testRoot,"/tmp/fileList.seq");
listing.buildListing(listFile,options);
reader=new SequenceFile.Reader(getConf(),SequenceFile.Reader.file(listFile));
CopyListingFileStatus fileStatus=new CopyListingFileStatus();
Text relativePath=new Text();
Assert.assertTrue(reader.next(relativePath,fileStatus));
Assert.assertTrue(relativePath.toString().equals(""));
}
catch ( Exception e) {
Assert.fail("Unexpected exception encountered.");
LOG.error("Unexpected exception: ",e);
}
finally {
TestDistCpUtils.delete(fs,testRootString);
IOUtils.closeStream(reader);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test main method of DistCp. Method should to call System.exit().
*/
@Test public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {
Configuration conf=getConf();
Path stagingDir=JobSubmissionFiles.getStagingDir(new Cluster(conf),conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
Path soure=createFile("tmp.txt");
Path target=createFile("target.txt");
try {
String[] arg={target.toString(),soure.toString()};
DistCp.main(arg);
Assert.fail();
}
catch ( ExitException t) {
Assert.assertTrue(fs.exists(target));
Assert.assertEquals(t.status,0);
Assert.assertEquals(stagingDir.getFileSystem(conf).listStatus(stagingDir).length,0);
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testRelativePath() throws Exception {
final Path sub1=new Path(inputPath,"dir1");
fs.mkdirs(sub1);
createFile(inputPath,fs,sub1.getName(),"a");
final FsShell shell=new FsShell(conf);
final List originalPaths=lsr(shell,"input");
System.out.println("originalPaths: " + originalPaths);
final String fullHarPathStr=makeArchive();
final List harPaths=lsr(shell,fullHarPathStr);
Assert.assertEquals(originalPaths,harPaths);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testPathWithSpaces() throws Exception {
createFile(inputPath,fs,"c c");
final Path sub1=new Path(inputPath,"sub 1");
fs.mkdirs(sub1);
createFile(sub1,fs,"file x y z");
createFile(sub1,fs,"file");
createFile(sub1,fs,"x");
createFile(sub1,fs,"y");
createFile(sub1,fs,"z");
final Path sub2=new Path(inputPath,"sub 1 with suffix");
fs.mkdirs(sub2);
createFile(sub2,fs,"z");
final FsShell shell=new FsShell(conf);
final String inputPathStr=inputPath.toUri().getPath();
final List originalPaths=lsr(shell,inputPathStr);
final String fullHarPathStr=makeArchive();
final List harPaths=lsr(shell,fullHarPathStr);
Assert.assertEquals(originalPaths,harPaths);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) public void testCleanup(){
try {
Path sourcePath=new Path("noscheme:///file");
List sources=new ArrayList();
sources.add(sourcePath);
DistCpOptions options=new DistCpOptions(sources,target);
Configuration conf=getConf();
Path stagingDir=JobSubmissionFiles.getStagingDir(new Cluster(conf),conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
try {
new DistCp(conf,options).execute();
}
catch ( Throwable t) {
Assert.assertEquals(stagingDir.getFileSystem(conf).listStatus(stagingDir).length,0);
}
}
catch ( Exception e) {
LOG.error("Exception encountered ",e);
Assert.fail("testCleanup failed " + e.getMessage());
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test JMX connection to DataNode..
* @throws Exception
*/
@Test public void testDataNode() throws Exception {
int numDatanodes=2;
cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
cluster.waitActive();
DFSTestUtil.createFile(cluster.getFileSystem(),new Path("/test"),fileSize,fileSize,blockSize,(short)2,seed);
JMXGet jmx=new JMXGet();
String serviceName="DataNode";
jmx.setService(serviceName);
jmx.init();
assertEquals(fileSize,Integer.parseInt(jmx.getValue("BytesWritten")));
cluster.shutdown();
MBeanServerConnection mbsc=ManagementFactory.getPlatformMBeanServer();
ObjectName query=new ObjectName("Hadoop:service=" + serviceName + ",*");
Set names=mbsc.queryNames(query,null);
assertTrue("No beans should be registered for " + serviceName,names.isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test JMX connection to NameNode..
* @throws Exception
*/
@Test public void testNameNode() throws Exception {
int numDatanodes=2;
cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
cluster.waitActive();
DFSTestUtil.createFile(cluster.getFileSystem(),new Path("/test1"),fileSize,fileSize,blockSize,(short)2,seed);
JMXGet jmx=new JMXGet();
String serviceName="NameNode";
jmx.setService(serviceName);
jmx.init();
assertTrue("error printAllValues",checkPrintAllValues(jmx));
assertEquals(numDatanodes,Integer.parseInt(jmx.getValue("NumLiveDataNodes")));
assertGauge("CorruptBlocks",Long.parseLong(jmx.getValue("CorruptBlocks")),getMetrics("FSNamesystem"));
assertEquals(numDatanodes,Integer.parseInt(jmx.getValue("NumOpenConnections")));
cluster.shutdown();
MBeanServerConnection mbsc=ManagementFactory.getPlatformMBeanServer();
ObjectName query=new ObjectName("Hadoop:service=" + serviceName + ",*");
Set names=mbsc.queryNames(query,null);
assertTrue("No beans should be registered for " + serviceName,names.isEmpty());
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testDeleteMissingFlatInterleavedFiles(){
TaskAttemptContext taskAttemptContext=getTaskAttemptContext(config);
JobContext jobContext=new JobContextImpl(taskAttemptContext.getConfiguration(),taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf=jobContext.getConfiguration();
String sourceBase;
String targetBase;
FileSystem fs=null;
try {
OutputCommitter committer=new CopyCommitter(null,taskAttemptContext);
fs=FileSystem.get(conf);
sourceBase="/tmp1/" + String.valueOf(rand.nextLong());
targetBase="/tmp1/" + String.valueOf(rand.nextLong());
TestDistCpUtils.createFile(fs,sourceBase + "/1");
TestDistCpUtils.createFile(fs,sourceBase + "/3");
TestDistCpUtils.createFile(fs,sourceBase + "/4");
TestDistCpUtils.createFile(fs,sourceBase + "/5");
TestDistCpUtils.createFile(fs,sourceBase + "/7");
TestDistCpUtils.createFile(fs,sourceBase + "/8");
TestDistCpUtils.createFile(fs,sourceBase + "/9");
TestDistCpUtils.createFile(fs,targetBase + "/2");
TestDistCpUtils.createFile(fs,targetBase + "/4");
TestDistCpUtils.createFile(fs,targetBase + "/5");
TestDistCpUtils.createFile(fs,targetBase + "/7");
TestDistCpUtils.createFile(fs,targetBase + "/9");
TestDistCpUtils.createFile(fs,targetBase + "/A");
DistCpOptions options=new DistCpOptions(Arrays.asList(new Path(sourceBase)),new Path("/out"));
options.setSyncFolder(true);
options.setDeleteMissing(true);
options.appendToConf(conf);
CopyListing listing=new GlobbedCopyListing(conf,CREDENTIALS);
Path listingFile=new Path("/tmp1/" + String.valueOf(rand.nextLong()));
listing.buildListing(listingFile,options);
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,targetBase);
conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,targetBase);
committer.commitJob(jobContext);
if (!TestDistCpUtils.checkIfFoldersAreInSync(fs,targetBase,sourceBase)) {
Assert.fail("Source and target folders are not in sync");
}
Assert.assertEquals(fs.listStatus(new Path(targetBase)).length,4);
committer.commitJob(jobContext);
if (!TestDistCpUtils.checkIfFoldersAreInSync(fs,targetBase,sourceBase)) {
Assert.fail("Source and target folders are not in sync");
}
Assert.assertEquals(fs.listStatus(new Path(targetBase)).length,4);
}
catch ( IOException e) {
LOG.error("Exception encountered while testing for delete missing",e);
Assert.fail("Delete missing failure");
}
finally {
TestDistCpUtils.delete(fs,"/tmp1");
conf.set(DistCpConstants.CONF_LABEL_DELETE_MISSING,"false");
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testAtomicCommitExistingFinal(){
TaskAttemptContext taskAttemptContext=getTaskAttemptContext(config);
JobContext jobContext=new JobContextImpl(taskAttemptContext.getConfiguration(),taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf=jobContext.getConfiguration();
String workPath="/tmp1/" + String.valueOf(rand.nextLong());
String finalPath="/tmp1/" + String.valueOf(rand.nextLong());
FileSystem fs=null;
try {
OutputCommitter committer=new CopyCommitter(null,taskAttemptContext);
fs=FileSystem.get(conf);
fs.mkdirs(new Path(workPath));
fs.mkdirs(new Path(finalPath));
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,workPath);
conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY,true);
Assert.assertTrue(fs.exists(new Path(workPath)));
Assert.assertTrue(fs.exists(new Path(finalPath)));
try {
committer.commitJob(jobContext);
Assert.fail("Should not be able to atomic-commit to pre-existing path.");
}
catch ( Exception exception) {
Assert.assertTrue(fs.exists(new Path(workPath)));
Assert.assertTrue(fs.exists(new Path(finalPath)));
LOG.info("Atomic-commit Test pass.");
}
}
catch ( IOException e) {
LOG.error("Exception encountered while testing for atomic commit.",e);
Assert.fail("Atomic commit failure");
}
finally {
TestDistCpUtils.delete(fs,workPath);
TestDistCpUtils.delete(fs,finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY,false);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testAtomicCommitMissingFinal(){
TaskAttemptContext taskAttemptContext=getTaskAttemptContext(config);
JobContext jobContext=new JobContextImpl(taskAttemptContext.getConfiguration(),taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf=jobContext.getConfiguration();
String workPath="/tmp1/" + String.valueOf(rand.nextLong());
String finalPath="/tmp1/" + String.valueOf(rand.nextLong());
FileSystem fs=null;
try {
OutputCommitter committer=new CopyCommitter(null,taskAttemptContext);
fs=FileSystem.get(conf);
fs.mkdirs(new Path(workPath));
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,workPath);
conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY,true);
Assert.assertTrue(fs.exists(new Path(workPath)));
Assert.assertFalse(fs.exists(new Path(finalPath)));
committer.commitJob(jobContext);
Assert.assertFalse(fs.exists(new Path(workPath)));
Assert.assertTrue(fs.exists(new Path(finalPath)));
committer.commitJob(jobContext);
Assert.assertFalse(fs.exists(new Path(workPath)));
Assert.assertTrue(fs.exists(new Path(finalPath)));
}
catch ( IOException e) {
LOG.error("Exception encountered while testing for preserve status",e);
Assert.fail("Atomic commit failure");
}
finally {
TestDistCpUtils.delete(fs,workPath);
TestDistCpUtils.delete(fs,finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY,false);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNoCommitAction(){
TaskAttemptContext taskAttemptContext=getTaskAttemptContext(config);
JobContext jobContext=new JobContextImpl(taskAttemptContext.getConfiguration(),taskAttemptContext.getTaskAttemptID().getJobID());
try {
OutputCommitter committer=new CopyCommitter(null,taskAttemptContext);
committer.commitJob(jobContext);
Assert.assertEquals(taskAttemptContext.getStatus(),"Commit Successful");
committer.commitJob(jobContext);
Assert.assertEquals(taskAttemptContext.getStatus(),"Commit Successful");
}
catch ( IOException e) {
LOG.error("Exception encountered ",e);
Assert.fail("Commit failed");
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* If a single file is being copied to a location where the file (of the same
* name) already exists, then the file shouldn't be skipped.
*/
@Test(timeout=40000) public void testSingleFileCopy(){
try {
deleteState();
touchFile(SOURCE_PATH + "/1");
Path sourceFilePath=pathList.get(0);
Path targetFilePath=new Path(sourceFilePath.toString().replaceAll(SOURCE_PATH,TARGET_PATH));
touchFile(targetFilePath.toString());
FileSystem fs=cluster.getFileSystem();
CopyMapper copyMapper=new CopyMapper();
StubContext stubContext=new StubContext(getConfiguration(),null,0);
Mapper.Context context=stubContext.getContext();
context.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,targetFilePath.getParent().toString());
copyMapper.setup(context);
final CopyListingFileStatus sourceFileStatus=new CopyListingFileStatus(fs.getFileStatus(sourceFilePath));
long before=fs.getFileStatus(targetFilePath).getModificationTime();
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),sourceFilePath)),sourceFileStatus,context);
long after=fs.getFileStatus(targetFilePath).getModificationTime();
Assert.assertTrue("File should have been skipped",before == after);
context.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,targetFilePath.toString());
copyMapper.setup(context);
before=fs.getFileStatus(targetFilePath).getModificationTime();
try {
Thread.sleep(2);
}
catch ( Throwable ignore) {
}
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),sourceFilePath)),sourceFileStatus,context);
after=fs.getFileStatus(targetFilePath).getModificationTime();
Assert.assertTrue("File should have been overwritten.",before < after);
}
catch ( Exception exception) {
Assert.fail("Unexpected exception: " + exception.getMessage());
exception.printStackTrace();
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* @throws IOExceptionThere should be files in the directory named by
* ${test.build.data}/rumen/histogram-test .
* There will be pairs of files, inputXxx.json and goldXxx.json .
* We read the input file as a HistogramRawTestData in json. Then we
* create a Histogram using the data field, and then a
* LoggedDiscreteCDF using the percentiles and scale field. Finally,
* we read the corresponding goldXxx.json as a LoggedDiscreteCDF and
* deepCompare them.
*/
@Test public void testHistograms() throws IOException {
final Configuration conf=new Configuration();
final FileSystem lfs=FileSystem.getLocal(conf);
final Path rootInputDir=new Path(System.getProperty("test.tools.input.dir","")).makeQualified(lfs);
final Path rootInputFile=new Path(rootInputDir,"rumen/histogram-tests");
FileStatus[] tests=lfs.listStatus(rootInputFile);
for (int i=0; i < tests.length; ++i) {
Path filePath=tests[i].getPath();
String fileName=filePath.getName();
if (fileName.startsWith("input")) {
String testName=fileName.substring("input".length());
Path goldFilePath=new Path(rootInputFile,"gold" + testName);
assertTrue("Gold file dies not exist",lfs.exists(goldFilePath));
LoggedDiscreteCDF newResult=histogramFileToCDF(filePath,lfs);
System.out.println("Testing a Histogram for " + fileName);
FSDataInputStream goldStream=lfs.open(goldFilePath);
JsonObjectMapperParser parser=new JsonObjectMapperParser(goldStream,LoggedDiscreteCDF.class);
try {
LoggedDiscreteCDF dcdf=parser.getNext();
dcdf.deepCompare(newResult,new TreePath(null,""));
}
catch ( DeepInequalityException e) {
fail(e.path.toString());
}
finally {
parser.close();
}
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testOneRun(){
LoggedDiscreteCDF input=new LoggedDiscreteCDF();
input.setMinimum(100000L);
input.setMaximum(1100000L);
ArrayList rankings=new ArrayList();
rankings.add(makeRR(0.1,200000L));
rankings.add(makeRR(0.5,800000L));
rankings.add(makeRR(0.9,1000000L));
input.setRankings(rankings);
input.setNumberValues(3);
CDFRandomGenerator gen=new CDFPiecewiseLinearRandomGenerator(input);
Histogram values=new Histogram();
for (int i=0; i < 1000000; ++i) {
long value=gen.randomValue();
values.enter(value);
}
int[] percentiles=new int[99];
for (int i=0; i < 99; ++i) {
percentiles[i]=i + 1;
}
long[] result=values.getCDF(100,percentiles);
long sumErrorSquares=0L;
for (int i=0; i < 10; ++i) {
long error=result[i] - (10000L * i + 100000L);
System.out.println("element " + i + ", got "+ result[i]+ ", expected "+ (10000L * i + 100000L)+ ", error = "+ error);
sumErrorSquares+=error * error;
}
for (int i=10; i < 50; ++i) {
long error=result[i] - (15000L * i + 50000L);
System.out.println("element " + i + ", got "+ result[i]+ ", expected "+ (15000L * i + 50000L)+ ", error = "+ error);
sumErrorSquares+=error * error;
}
for (int i=50; i < 90; ++i) {
long error=result[i] - (5000L * i + 550000L);
System.out.println("element " + i + ", got "+ result[i]+ ", expected "+ (5000L * i + 550000L)+ ", error = "+ error);
sumErrorSquares+=error * error;
}
for (int i=90; i <= 100; ++i) {
long error=result[i] - (10000L * i + 100000L);
System.out.println("element " + i + ", got "+ result[i]+ ", expected "+ (10000L * i + 100000L)+ ", error = "+ error);
sumErrorSquares+=error * error;
}
double realSumErrorSquares=(double)sumErrorSquares;
double normalizedError=realSumErrorSquares / 100 / rankings.get(1).getDatum()/ rankings.get(1).getDatum();
double RMSNormalizedError=Math.sqrt(normalizedError);
System.out.println("sumErrorSquares = " + sumErrorSquares);
System.out.println("normalizedError: " + normalizedError + ", RMSNormalizedError: "+ RMSNormalizedError);
System.out.println("Cumulative error is " + RMSNormalizedError);
assertTrue("The RMS relative error per bucket, " + RMSNormalizedError + ", exceeds our tolerance of "+ maximumRelativeError,RMSNormalizedError <= maximumRelativeError);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testPreserve(){
try {
FileSystem fs=FileSystem.get(config);
EnumSet attributes=EnumSet.noneOf(FileAttribute.class);
Path path=new Path("/tmp/abc");
Path src=new Path("/tmp/src");
fs.mkdirs(path);
fs.mkdirs(src);
CopyListingFileStatus srcStatus=new CopyListingFileStatus(fs.getFileStatus(src));
FsPermission noPerm=new FsPermission((short)0);
fs.setPermission(path,noPerm);
fs.setOwner(path,"nobody","nobody");
DistCpUtils.preserve(fs,path,srcStatus,attributes,false);
FileStatus target=fs.getFileStatus(path);
Assert.assertEquals(target.getPermission(),noPerm);
Assert.assertEquals(target.getOwner(),"nobody");
Assert.assertEquals(target.getGroup(),"nobody");
attributes.add(FileAttribute.PERMISSION);
DistCpUtils.preserve(fs,path,srcStatus,attributes,false);
target=fs.getFileStatus(path);
Assert.assertEquals(target.getPermission(),srcStatus.getPermission());
Assert.assertEquals(target.getOwner(),"nobody");
Assert.assertEquals(target.getGroup(),"nobody");
attributes.add(FileAttribute.GROUP);
attributes.add(FileAttribute.USER);
DistCpUtils.preserve(fs,path,srcStatus,attributes,false);
target=fs.getFileStatus(path);
Assert.assertEquals(target.getPermission(),srcStatus.getPermission());
Assert.assertEquals(target.getOwner(),srcStatus.getOwner());
Assert.assertEquals(target.getGroup(),srcStatus.getGroup());
fs.delete(path,true);
fs.delete(src,true);
}
catch ( IOException e) {
LOG.error("Exception encountered ",e);
Assert.fail("Preserve test failure");
}
}
APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetResource() throws IOException {
URL testJar=makeTestJar().toURI().toURL();
ClassLoader currentClassLoader=getClass().getClassLoader();
ClassLoader appClassloader=new ApplicationClassLoader(new URL[]{testJar},currentClassLoader,null);
assertNull("Resource should be null for current classloader",currentClassLoader.getResourceAsStream("resource.txt"));
InputStream in=appClassloader.getResourceAsStream("resource.txt");
assertNotNull("Resource should not be null for app classloader",in);
assertEquals("hello",IOUtils.toString(in));
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testConstructUrlsFromClasspath() throws Exception {
File file=new File(testDir,"file");
assertTrue("Create file",file.createNewFile());
File dir=new File(testDir,"dir");
assertTrue("Make dir",dir.mkdir());
File jarsDir=new File(testDir,"jarsdir");
assertTrue("Make jarsDir",jarsDir.mkdir());
File nonJarFile=new File(jarsDir,"nonjar");
assertTrue("Create non-jar file",nonJarFile.createNewFile());
File jarFile=new File(jarsDir,"a.jar");
assertTrue("Create jar file",jarFile.createNewFile());
File nofile=new File(testDir,"nofile");
StringBuilder cp=new StringBuilder();
cp.append(file.getAbsolutePath()).append(File.pathSeparator).append(dir.getAbsolutePath()).append(File.pathSeparator).append(jarsDir.getAbsolutePath() + "/*").append(File.pathSeparator).append(nofile.getAbsolutePath()).append(File.pathSeparator).append(nofile.getAbsolutePath() + "/*").append(File.pathSeparator);
URL[] urls=constructUrlsFromClasspath(cp.toString());
assertEquals(3,urls.length);
assertEquals(file.toURI().toURL(),urls[0]);
assertEquals(dir.toURI().toURL(),urls[1]);
assertEquals(jarFile.toURI().toURL(),urls[2]);
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testUnrecognized(){
try {
Classpath.main(new String[]{"--notarealoption"});
fail("expected exit");
}
catch ( ExitUtil.ExitException e) {
assertTrue(stdout.toByteArray().length == 0);
String strErr=new String(stderr.toByteArray(),UTF8);
assertTrue(strErr.contains("unrecognized option"));
}
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test public void testJarFileMissing() throws IOException {
try {
Classpath.main(new String[]{"--jar"});
fail("expected exit");
}
catch ( ExitUtil.ExitException e) {
assertTrue(stdout.toByteArray().length == 0);
String strErr=new String(stderr.toByteArray(),UTF8);
assertTrue(strErr.contains("requires path of jar"));
}
}
APIUtilityVerifier BooleanVerifier
@Test public void testHelpShort(){
Classpath.main(new String[]{"-h"});
String strOut=new String(stdout.toByteArray(),UTF8);
assertTrue(strOut.contains("Prints the classpath"));
assertTrue(stderr.toByteArray().length == 0);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlob(){
Classpath.main(new String[]{"--glob"});
String strOut=new String(stdout.toByteArray(),UTF8);
assertEquals(System.getProperty("java.class.path"),strOut.trim());
assertTrue(stderr.toByteArray().length == 0);
}
APIUtilityVerifier BooleanVerifier
@Test public void testHelp(){
Classpath.main(new String[]{"--help"});
String strOut=new String(stdout.toByteArray(),UTF8);
assertTrue(strOut.contains("Prints the classpath"));
assertTrue(stderr.toByteArray().length == 0);
}
APIUtilityVerifier NullVerifier
@Test public void testExistingManifest() throws Exception {
File dir=new File(System.getProperty("test.build.dir","target/test-dir"),TestJarFinder.class.getName() + "-testExistingManifest");
delete(dir);
dir.mkdirs();
File metaInfDir=new File(dir,"META-INF");
metaInfDir.mkdirs();
File manifestFile=new File(metaInfDir,"MANIFEST.MF");
Manifest manifest=new Manifest();
OutputStream os=new FileOutputStream(manifestFile);
manifest.write(os);
os.close();
File propsFile=new File(dir,"props.properties");
Writer writer=new FileWriter(propsFile);
new Properties().store(writer,"");
writer.close();
ByteArrayOutputStream baos=new ByteArrayOutputStream();
JarOutputStream zos=new JarOutputStream(baos);
JarFinder.jarDir(dir,"",zos);
JarInputStream jis=new JarInputStream(new ByteArrayInputStream(baos.toByteArray()));
Assert.assertNotNull(jis.getManifest());
jis.close();
}
APIUtilityVerifier NullVerifier
@Test public void testNoManifest() throws Exception {
File dir=new File(System.getProperty("test.build.dir","target/test-dir"),TestJarFinder.class.getName() + "-testNoManifest");
delete(dir);
dir.mkdirs();
File propsFile=new File(dir,"props.properties");
Writer writer=new FileWriter(propsFile);
new Properties().store(writer,"");
writer.close();
ByteArrayOutputStream baos=new ByteArrayOutputStream();
JarOutputStream zos=new JarOutputStream(baos);
JarFinder.jarDir(dir,"",zos);
JarInputStream jis=new JarInputStream(new ByteArrayInputStream(baos.toByteArray()));
Assert.assertNotNull(jis.getManifest());
jis.close();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testRemoveSomeViaIterator(){
ArrayList list=getRandomList(100,123);
LightWeightGSet set=new LightWeightGSet(16);
for ( Integer i : list) {
set.put(new TestElement(i));
}
long sum=0;
for (Iterator iter=set.iterator(); iter.hasNext(); ) {
sum+=iter.next().getVal();
}
long mode=sum / set.size();
LOG.info("Removing all elements above " + mode);
for (Iterator iter=set.iterator(); iter.hasNext(); ) {
int item=iter.next().getVal();
if (item > mode) {
iter.remove();
}
}
for (Iterator iter=set.iterator(); iter.hasNext(); ) {
Assert.assertTrue(iter.next().getVal() <= mode);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testCustomDelimiter() throws Exception {
Delimiter="";
String CurrentBufferTailToken="Gelesh";
String NextBufferHeadToken="id>Omathil ";
String Expected=(CurrentBufferTailToken + NextBufferHeadToken).replace(Delimiter,"");
String TestPartOfInput=CurrentBufferTailToken + NextBufferHeadToken;
int BufferSize=64 * 1024;
int numberOfCharToFillTheBuffer=BufferSize - CurrentBufferTailToken.length();
StringBuilder fillerString=new StringBuilder();
for (int i=0; i < numberOfCharToFillTheBuffer; i++) {
fillerString.append('a');
}
TestData=fillerString + TestPartOfInput;
lineReader=new LineReader(new ByteArrayInputStream(TestData.getBytes()),Delimiter.getBytes());
line=new Text();
lineReader.readLine(line);
Assert.assertEquals(fillerString.toString(),line.toString());
lineReader.readLine(line);
Assert.assertEquals(Expected,line.toString());
Delimiter="record";
StringBuilder TestStringBuilder=new StringBuilder();
TestStringBuilder.append(Delimiter + "Kerala ");
TestStringBuilder.append(Delimiter + "Bangalore");
TestStringBuilder.append(Delimiter + " North Korea");
TestStringBuilder.append(Delimiter + Delimiter + "Guantanamo");
TestStringBuilder.append(Delimiter + "ecord" + "recor"+ "core");
TestData=TestStringBuilder.toString();
lineReader=new LineReader(new ByteArrayInputStream(TestData.getBytes()),Delimiter.getBytes());
lineReader.readLine(line);
Assert.assertEquals("",line.toString());
lineReader.readLine(line);
Assert.assertEquals("Kerala ",line.toString());
lineReader.readLine(line);
Assert.assertEquals("Bangalore",line.toString());
lineReader.readLine(line);
Assert.assertEquals(" North Korea",line.toString());
lineReader.readLine(line);
Assert.assertEquals("",line.toString());
lineReader.readLine(line);
Assert.assertEquals("Guantanamo",line.toString());
lineReader.readLine(line);
Assert.assertEquals(("ecord" + "recor" + "core"),line.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHostNames() throws UnknownHostException {
InetAddress addressHost1=InetAddress.getByName("1.2.3.1");
InetAddress addressHost4=InetAddress.getByName("1.2.3.4");
InetAddress addressMockHost4=Mockito.mock(InetAddress.class);
Mockito.when(addressMockHost4.getCanonicalHostName()).thenReturn("differentName");
InetAddress addressMockHost5=Mockito.mock(InetAddress.class);
Mockito.when(addressMockHost5.getCanonicalHostName()).thenReturn("host5");
MachineList.InetAddressFactory addressFactory=Mockito.mock(MachineList.InetAddressFactory.class);
Mockito.when(addressFactory.getByName("1.2.3.4")).thenReturn(addressMockHost4);
Mockito.when(addressFactory.getByName("1.2.3.5")).thenReturn(addressMockHost5);
Mockito.when(addressFactory.getByName("host1")).thenReturn(addressHost1);
Mockito.when(addressFactory.getByName("host4")).thenReturn(addressHost4);
MachineList ml=new MachineList(StringUtils.getTrimmedStringCollection(HOST_LIST),addressFactory);
assertTrue(ml.includes("1.2.3.4"));
assertFalse(ml.includes("1.2.3.5"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testStaticIPHostNameList() throws UnknownHostException {
InetAddress addressHost1=InetAddress.getByName("1.2.3.1");
InetAddress addressHost4=InetAddress.getByName("1.2.3.4");
MachineList.InetAddressFactory addressFactory=Mockito.mock(MachineList.InetAddressFactory.class);
Mockito.when(addressFactory.getByName("host1")).thenReturn(addressHost1);
Mockito.when(addressFactory.getByName("host4")).thenReturn(addressHost4);
MachineList ml=new MachineList(StringUtils.getTrimmedStringCollection(HOST_LIST),addressFactory);
assertTrue(ml.includes("1.2.3.4"));
assertFalse(ml.includes("1.2.3.5"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHostNamesReverserIpMatch() throws UnknownHostException {
InetAddress addressHost1=InetAddress.getByName("1.2.3.1");
InetAddress addressHost4=InetAddress.getByName("1.2.3.4");
InetAddress addressMockHost4=Mockito.mock(InetAddress.class);
Mockito.when(addressMockHost4.getCanonicalHostName()).thenReturn("host4");
InetAddress addressMockHost5=Mockito.mock(InetAddress.class);
Mockito.when(addressMockHost5.getCanonicalHostName()).thenReturn("host5");
MachineList.InetAddressFactory addressFactory=Mockito.mock(MachineList.InetAddressFactory.class);
Mockito.when(addressFactory.getByName("1.2.3.4")).thenReturn(addressMockHost4);
Mockito.when(addressFactory.getByName("1.2.3.5")).thenReturn(addressMockHost5);
Mockito.when(addressFactory.getByName("host1")).thenReturn(addressHost1);
Mockito.when(addressFactory.getByName("host4")).thenReturn(addressHost4);
MachineList ml=new MachineList(StringUtils.getTrimmedStringCollection(HOST_LIST),addressFactory);
assertTrue(ml.includes("1.2.3.4"));
assertFalse(ml.includes("1.2.3.5"));
}
APIUtilityVerifier BooleanVerifier
@Test public void testRpcClientId(){
byte[] uuid=ClientId.getClientId();
RpcRequestHeaderProto header=ProtoUtil.makeRpcRequestHeader(RpcKind.RPC_PROTOCOL_BUFFER,OperationProto.RPC_FINAL_PACKET,0,RpcConstants.INVALID_RETRY_COUNT,uuid);
assertTrue(Arrays.equals(uuid,header.getClientId().toByteArray()));
}
APIUtilityVerifier IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("unchecked") @Test public void testCacheDoesntLeak() throws Exception {
int iterations=9999;
for (int i=0; i < iterations; i++) {
URLClassLoader loader=new URLClassLoader(new URL[0],getClass().getClassLoader());
Class cl=Class.forName("org.apache.hadoop.util.TestReflectionUtils$LoadedInChild",false,loader);
Object o=ReflectionUtils.newInstance(cl,null);
assertEquals(cl,o.getClass());
}
System.gc();
assertTrue(cacheSize() + " too big",cacheSize() < iterations);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=3000) public void testShutdownThread(){
Thread thread=new Thread(sampleRunnable);
thread.start();
boolean ret=ShutdownThreadsHelper.shutdownThread(thread);
boolean isTerminated=!thread.isAlive();
assertEquals("Incorrect return value",ret,isTerminated);
assertTrue("Thread is not shutdown",isTerminated);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testShutdownThreadPool() throws InterruptedException {
ScheduledThreadPoolExecutor executor=new ScheduledThreadPoolExecutor(1);
executor.execute(sampleRunnable);
boolean ret=ShutdownThreadsHelper.shutdownExecutorService(executor);
boolean isTerminated=executor.isTerminated();
assertEquals("Incorrect return value",ret,isTerminated);
assertTrue("ExecutorService is not shutdown",isTerminated);
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=30000) public void testGetTrimmedStrings() throws Exception {
String compactDirList="/spindle1/hdfs,/spindle2/hdfs,/spindle3/hdfs";
String spacedDirList="/spindle1/hdfs, /spindle2/hdfs, /spindle3/hdfs";
String pathologicalDirList1=" /spindle1/hdfs , /spindle2/hdfs ,/spindle3/hdfs ";
String pathologicalDirList2=" /spindle1/hdfs , /spindle2/hdfs ,/spindle3/hdfs , ";
String emptyList1="";
String emptyList2=" ";
String[] expectedArray={"/spindle1/hdfs","/spindle2/hdfs","/spindle3/hdfs"};
String[] emptyArray={};
assertArrayEquals(expectedArray,StringUtils.getTrimmedStrings(compactDirList));
assertArrayEquals(expectedArray,StringUtils.getTrimmedStrings(spacedDirList));
assertArrayEquals(expectedArray,StringUtils.getTrimmedStrings(pathologicalDirList1));
assertArrayEquals(expectedArray,StringUtils.getTrimmedStrings(pathologicalDirList2));
assertArrayEquals(emptyArray,StringUtils.getTrimmedStrings(emptyList1));
String[] estring=StringUtils.getTrimmedStrings(emptyList2);
assertArrayEquals(emptyArray,estring);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetUniqueNonEmptyTrimmedStrings(){
final String TO_SPLIT=",foo, bar,baz,,blah,blah,bar,";
Collection col=StringUtils.getTrimmedStringCollection(TO_SPLIT);
assertEquals(4,col.size());
assertTrue(col.containsAll(Arrays.asList(new String[]{"foo","bar","baz","blah"})));
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=30000) public void testSplit() throws Exception {
assertEquals(NULL_STR,StringUtils.split(NULL_STR));
String[] splits=StringUtils.split(EMPTY_STR);
assertEquals(0,splits.length);
splits=StringUtils.split(",,");
assertEquals(0,splits.length);
splits=StringUtils.split(STR_WO_SPECIAL_CHARS);
assertEquals(1,splits.length);
assertEquals(STR_WO_SPECIAL_CHARS,splits[0]);
splits=StringUtils.split(STR_WITH_COMMA);
assertEquals(2,splits.length);
assertEquals("A",splits[0]);
assertEquals("B",splits[1]);
splits=StringUtils.split(ESCAPED_STR_WITH_COMMA);
assertEquals(1,splits.length);
assertEquals(ESCAPED_STR_WITH_COMMA,splits[0]);
splits=StringUtils.split(STR_WITH_ESCAPE);
assertEquals(1,splits.length);
assertEquals(STR_WITH_ESCAPE,splits[0]);
splits=StringUtils.split(STR_WITH_BOTH2);
assertEquals(3,splits.length);
assertEquals(EMPTY_STR,splits[0]);
assertEquals("A\\,",splits[1]);
assertEquals("B\\\\",splits[2]);
splits=StringUtils.split(ESCAPED_STR_WITH_BOTH2);
assertEquals(1,splits.length);
assertEquals(ESCAPED_STR_WITH_BOTH2,splits[0]);
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testLs() throws IOException {
final String content="6bytes";
final int contentSize=content.length();
File testFile=new File(TEST_DIR,"file1");
writeFile(testFile,content);
String output=Shell.execCommand(Shell.WINUTILS,"ls",testFile.getCanonicalPath());
String[] outputArgs=output.split("[ \r\n]");
assertTrue(outputArgs[0].equals("-rwx------"));
assertTrue(outputArgs[outputArgs.length - 1].equals(testFile.getCanonicalPath()));
output=Shell.execCommand(Shell.WINUTILS,"ls","-F",testFile.getCanonicalPath());
outputArgs=output.split("[|\r\n]");
assertEquals(9,outputArgs.length);
assertTrue(outputArgs[0].equals("-rwx------"));
assertEquals(contentSize,Long.parseLong(outputArgs[4]));
assertTrue(outputArgs[8].equals(testFile.getCanonicalPath()));
testFile.delete();
assertFalse(testFile.exists());
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=30000) public void testGroups() throws IOException {
String currentUser=System.getProperty("user.name");
String outputNoArgs=Shell.execCommand(Shell.WINUTILS,"groups").trim();
String output=Shell.execCommand(Shell.WINUTILS,"groups",currentUser).trim();
assertEquals(output,outputNoArgs);
String outputFormat=Shell.execCommand(Shell.WINUTILS,"groups","-F",currentUser).trim();
outputFormat=outputFormat.replace("|"," ");
assertEquals(output,outputFormat);
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testSymlinkRejectsForwardSlashesInLink() throws IOException {
File newFile=new File(TEST_DIR,"file");
assertTrue(newFile.createNewFile());
String target=newFile.getPath();
String link=new File(TEST_DIR,"link").getPath().replaceAll("\\\\","/");
try {
Shell.execCommand(Shell.WINUTILS,"symlink",link,target);
fail(String.format("did not receive expected failure creating symlink " + "with forward slashes in link: link = %s, target = %s",link,target));
}
catch ( IOException e) {
LOG.info("Expected: Failed to create symlink with forward slashes in target");
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Validate behavior of chmod commands on directories on Windows.
*/
@Test(timeout=30000) public void testBasicChmodOnDir() throws IOException {
File a=new File(TEST_DIR,"a");
File b=new File(a,"b");
a.mkdirs();
assertTrue(b.createNewFile());
chmod("300",a);
String[] files=a.list();
assertTrue("Listing a directory without read permission should fail",null == files);
chmod("700",a);
files=a.list();
assertEquals("b",files[0]);
chmod("500",a);
File c=new File(a,"c");
try {
c.createNewFile();
assertFalse("writeFile should have failed!",true);
}
catch ( IOException ex) {
LOG.info("Expected: Failed to create a file when directory " + "permissions are 577");
}
assertTrue("Special behavior: deleting a file will succeed on Windows " + "even if a user does not have write permissions on the parent dir",b.delete());
assertFalse("Renaming a file should fail on the dir where a user does " + "not have write permissions",b.renameTo(new File(a,"d")));
chmod("700",a);
assertTrue(c.createNewFile());
File d=new File(a,"d");
assertTrue(c.renameTo(d));
chmod("600",a);
files=a.list();
assertEquals("d",files[0]);
assertTrue(d.delete());
File e=new File(a,"e");
assertTrue(e.createNewFile());
assertTrue(e.renameTo(new File(a,"f")));
chmod("700",a);
}
APIUtilityVerifier UtilityVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testSymlinkRejectsForwardSlashesInTarget() throws IOException {
File newFile=new File(TEST_DIR,"file");
assertTrue(newFile.createNewFile());
String target=newFile.getPath().replaceAll("\\\\","/");
String link=new File(TEST_DIR,"link").getPath();
try {
Shell.execCommand(Shell.WINUTILS,"symlink",link,target);
fail(String.format("did not receive expected failure creating symlink " + "with forward slashes in target: link = %s, target = %s",link,target));
}
catch ( IOException e) {
LOG.info("Expected: Failed to create symlink with forward slashes in target");
}
}
APIUtilityVerifier UtilityVerifier BooleanVerifier ConditionMatcher HybridVerifier
@Test(timeout=30000) public void testReadLink() throws IOException {
File dir1=new File(TEST_DIR,"dir1");
assertTrue(dir1.mkdirs());
File file1=new File(dir1,"file1.txt");
assertTrue(file1.createNewFile());
File dirLink=new File(TEST_DIR,"dlink");
File fileLink=new File(TEST_DIR,"flink");
Shell.execCommand(Shell.WINUTILS,"symlink",dirLink.toString(),dir1.toString());
Shell.execCommand(Shell.WINUTILS,"symlink",fileLink.toString(),file1.toString());
String readLinkOutput=Shell.execCommand(Shell.WINUTILS,"readlink",dirLink.toString());
assertThat(readLinkOutput,equalTo(dir1.toString()));
readLinkOutput=Shell.execCommand(Shell.WINUTILS,"readlink",fileLink.toString());
assertThat(readLinkOutput,equalTo(file1.toString()));
try {
Shell.execCommand(Shell.WINUTILS,"readlink","");
fail("Failed to get Shell.ExitCodeException when reading bad symlink");
}
catch ( Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(),is(1));
}
try {
Shell.execCommand(Shell.WINUTILS,"readlink","ThereIsNoSuchLink");
fail("Failed to get Shell.ExitCodeException when reading bad symlink");
}
catch ( Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(),is(1));
}
try {
Shell.execCommand(Shell.WINUTILS,"readlink",dir1.toString());
fail("Failed to get Shell.ExitCodeException when reading bad symlink");
}
catch ( Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(),is(1));
}
try {
Shell.execCommand(Shell.WINUTILS,"readlink",file1.toString());
fail("Failed to get Shell.ExitCodeException when reading bad symlink");
}
catch ( Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(),is(1));
}
try {
Shell.execCommand(Shell.WINUTILS,"readlink","a","b");
fail("Failed to get Shell.ExitCodeException with bad parameters");
}
catch ( Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(),is(1));
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testRemoveSpecificPerms(){
int perms=Perms.ALL;
int remove=Perms.CREATE;
int newPerms=ZKUtil.removeSpecificPerms(perms,remove);
assertEquals("Removal failed",0,newPerms & Perms.CREATE);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGoodAuths(){
List result=ZKUtil.parseAuth("scheme:data,\n scheme2:user:pass");
assertEquals(2,result.size());
ZKAuthInfo auth0=result.get(0);
assertEquals("scheme",auth0.getScheme());
assertEquals("data",new String(auth0.getAuth()));
ZKAuthInfo auth1=result.get(1);
assertEquals("scheme2",auth1.getScheme());
assertEquals("user:pass",new String(auth1.getAuth()));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGoodACLs(){
List result=ZKUtil.parseACLs("sasl:hdfs/host1@MY.DOMAIN:cdrwa, sasl:hdfs/host2@MY.DOMAIN:ca");
ACL acl0=result.get(0);
assertEquals(Perms.CREATE | Perms.DELETE | Perms.READ| Perms.WRITE| Perms.ADMIN,acl0.getPerms());
assertEquals("sasl",acl0.getId().getScheme());
assertEquals("hdfs/host1@MY.DOMAIN",acl0.getId().getId());
ACL acl1=result.get(1);
assertEquals(Perms.CREATE | Perms.ADMIN,acl1.getPerms());
assertEquals("sasl",acl1.getId().getScheme());
assertEquals("hdfs/host2@MY.DOMAIN",acl1.getId().getId());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testHash(){
int iterations=30;
assertTrue("testHash jenkins error !!!",Hash.JENKINS_HASH == Hash.parseHashType("jenkins"));
assertTrue("testHash murmur error !!!",Hash.MURMUR_HASH == Hash.parseHashType("murmur"));
assertTrue("testHash undefined",Hash.INVALID_HASH == Hash.parseHashType("undefined"));
Configuration cfg=new Configuration();
cfg.set("hadoop.util.hash.type","murmur");
assertTrue("testHash",MurmurHash.getInstance() == Hash.getInstance(cfg));
cfg=new Configuration();
cfg.set("hadoop.util.hash.type","jenkins");
assertTrue("testHash jenkins configuration error !!!",JenkinsHash.getInstance() == Hash.getInstance(cfg));
cfg=new Configuration();
assertTrue("testHash undefine configuration error !!!",MurmurHash.getInstance() == Hash.getInstance(cfg));
assertTrue("testHash error jenkin getInstance !!!",JenkinsHash.getInstance() == Hash.getInstance(Hash.JENKINS_HASH));
assertTrue("testHash error murmur getInstance !!!",MurmurHash.getInstance() == Hash.getInstance(Hash.MURMUR_HASH));
assertNull("testHash error invalid getInstance !!!",Hash.getInstance(Hash.INVALID_HASH));
int murmurHash=Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes());
for (int i=0; i < iterations; i++) {
assertTrue("multiple evaluation murmur hash error !!!",murmurHash == Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes()));
}
murmurHash=Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes(),67);
for (int i=0; i < iterations; i++) {
assertTrue("multiple evaluation murmur hash error !!!",murmurHash == Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes(),67));
}
int jenkinsHash=Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes());
for (int i=0; i < iterations; i++) {
assertTrue("multiple evaluation jenkins hash error !!!",jenkinsHash == Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes()));
}
jenkinsHash=Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes(),67);
for (int i=0; i < iterations; i++) {
assertTrue("multiple evaluation jenkins hash error !!!",jenkinsHash == Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes(),67));
}
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test public void testFactoryProvider(){
Configuration conf=new Configuration();
RpcClientFactory clientFactory=null;
RpcServerFactory serverFactory=null;
clientFactory=RpcFactoryProvider.getClientFactory(conf);
serverFactory=RpcFactoryProvider.getServerFactory(conf);
Assert.assertEquals(RpcClientFactoryPBImpl.class,clientFactory.getClass());
Assert.assertEquals(RpcServerFactoryPBImpl.class,serverFactory.getClass());
conf.set(YarnConfiguration.IPC_CLIENT_FACTORY_CLASS,"unknown");
conf.set(YarnConfiguration.IPC_SERVER_FACTORY_CLASS,"unknown");
conf.set(YarnConfiguration.IPC_RECORD_FACTORY_CLASS,"unknown");
try {
clientFactory=RpcFactoryProvider.getClientFactory(conf);
Assert.fail("Expected an exception - unknown serializer");
}
catch ( YarnRuntimeException e) {
}
try {
serverFactory=RpcFactoryProvider.getServerFactory(conf);
Assert.fail("Expected an exception - unknown serializer");
}
catch ( YarnRuntimeException e) {
}
conf=new Configuration();
conf.set(YarnConfiguration.IPC_CLIENT_FACTORY_CLASS,"NonExistantClass");
conf.set(YarnConfiguration.IPC_SERVER_FACTORY_CLASS,RpcServerFactoryPBImpl.class.getName());
try {
clientFactory=RpcFactoryProvider.getClientFactory(conf);
Assert.fail("Expected an exception - unknown class");
}
catch ( YarnRuntimeException e) {
}
try {
serverFactory=RpcFactoryProvider.getServerFactory(conf);
}
catch ( YarnRuntimeException e) {
Assert.fail("Error while loading factory using reflection: [" + RpcServerFactoryPBImpl.class.getName() + "]");
}
}
APIUtilityVerifier IdentityVerifier
/**
*
* Throw {@code Error} inside thread and
* check {@code YarnUncaughtExceptionHandler} instance
*
* Used {@code ExitUtil} class to avoid jvm exit through{@code System.exit(-1) }
* @throws InterruptedException
*/
@Test public void testUncaughtExceptionHandlerWithError() throws InterruptedException {
ExitUtil.disableSystemExit();
final YarnUncaughtExceptionHandler spyErrorHandler=spy(exHandler);
final java.lang.Error error=new java.lang.Error("test-error");
final Thread errorThread=new Thread(new Runnable(){
@Override public void run(){
throw error;
}
}
);
errorThread.setUncaughtExceptionHandler(spyErrorHandler);
assertSame(spyErrorHandler,errorThread.getUncaughtExceptionHandler());
errorThread.start();
errorThread.join();
verify(spyErrorHandler).uncaughtException(errorThread,error);
}
APIUtilityVerifier IdentityVerifier
/**
* Throw {@code YarnRuntimeException} inside thread and
* check {@code YarnUncaughtExceptionHandler} instance
* @throws InterruptedException
*/
@Test public void testUncaughtExceptionHandlerWithRuntimeException() throws InterruptedException {
final YarnUncaughtExceptionHandler spyYarnHandler=spy(exHandler);
final YarnRuntimeException yarnException=new YarnRuntimeException("test-yarn-runtime-exception");
final Thread yarnThread=new Thread(new Runnable(){
@Override public void run(){
throw yarnException;
}
}
);
yarnThread.setUncaughtExceptionHandler(spyYarnHandler);
assertSame(spyYarnHandler,yarnThread.getUncaughtExceptionHandler());
yarnThread.start();
yarnThread.join();
verify(spyYarnHandler).uncaughtException(yarnThread,yarnException);
}
APIUtilityVerifier IdentityVerifier
/**
*
* Throw {@code OutOfMemoryError} inside thread and
* check {@code YarnUncaughtExceptionHandler} instance
*
* Used {@code ExitUtil} class to avoid jvm exit through{@code Runtime.getRuntime().halt(-1)}
* @throws InterruptedException
*/
@Test public void testUncaughtExceptionHandlerWithOutOfMemoryError() throws InterruptedException {
ExitUtil.disableSystemHalt();
final YarnUncaughtExceptionHandler spyOomHandler=spy(exHandler);
final OutOfMemoryError oomError=new OutOfMemoryError("out-of-memory-error");
final Thread oomThread=new Thread(new Runnable(){
@Override public void run(){
throw oomError;
}
}
);
oomThread.setUncaughtExceptionHandler(spyOomHandler);
assertSame(spyOomHandler,oomThread.getUncaughtExceptionHandler());
oomThread.start();
oomThread.join();
verify(spyOomHandler).uncaughtException(oomThread,oomError);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testAllcoateRequestWithIncrease(){
List incRequests=new ArrayList();
for (int i=0; i < 3; i++) {
incRequests.add(ContainerResourceIncreaseRequest.newInstance(null,Resource.newInstance(0,i)));
}
AllocateRequest r=AllocateRequest.newInstance(123,0f,null,null,null,incRequests);
AllocateRequestProto p=((AllocateRequestPBImpl)r).getProto();
r=new AllocateRequestPBImpl(p);
Assert.assertEquals(123,r.getResponseId());
Assert.assertEquals(incRequests.size(),r.getIncreaseRequests().size());
for (int i=0; i < incRequests.size(); i++) {
Assert.assertEquals(r.getIncreaseRequests().get(i).getCapability().getVirtualCores(),incRequests.get(i).getCapability().getVirtualCores());
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testAllocateResponseWithIncDecContainers(){
List incContainers=new ArrayList();
List decContainers=new ArrayList();
for (int i=0; i < 3; i++) {
incContainers.add(ContainerResourceIncrease.newInstance(null,Resource.newInstance(1024,i),null));
}
for (int i=0; i < 5; i++) {
decContainers.add(ContainerResourceDecrease.newInstance(null,Resource.newInstance(1024,i)));
}
AllocateResponse r=AllocateResponse.newInstance(3,new ArrayList(),new ArrayList(),new ArrayList(),null,AMCommand.AM_RESYNC,3,null,new ArrayList(),incContainers,decContainers);
AllocateResponseProto p=((AllocateResponsePBImpl)r).getProto();
r=new AllocateResponsePBImpl(p);
Assert.assertEquals(incContainers.size(),r.getIncreasedContainers().size());
Assert.assertEquals(decContainers.size(),r.getDecreasedContainers().size());
for (int i=0; i < incContainers.size(); i++) {
Assert.assertEquals(i,r.getIncreasedContainers().get(i).getCapability().getVirtualCores());
}
for (int i=0; i < decContainers.size(); i++) {
Assert.assertEquals(i,r.getDecreasedContainers().get(i).getCapability().getVirtualCores());
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationAttemptId(){
ApplicationAttemptId a1=createAppAttemptId(10l,1,1);
ApplicationAttemptId a2=createAppAttemptId(10l,1,2);
ApplicationAttemptId a3=createAppAttemptId(10l,2,1);
ApplicationAttemptId a4=createAppAttemptId(8l,1,4);
ApplicationAttemptId a5=createAppAttemptId(10l,1,1);
Assert.assertTrue(a1.equals(a5));
Assert.assertFalse(a1.equals(a2));
Assert.assertFalse(a1.equals(a3));
Assert.assertFalse(a1.equals(a4));
Assert.assertTrue(a1.compareTo(a5) == 0);
Assert.assertTrue(a1.compareTo(a2) < 0);
Assert.assertTrue(a1.compareTo(a3) < 0);
Assert.assertTrue(a1.compareTo(a4) > 0);
Assert.assertTrue(a1.hashCode() == a5.hashCode());
Assert.assertFalse(a1.hashCode() == a2.hashCode());
Assert.assertFalse(a1.hashCode() == a3.hashCode());
Assert.assertFalse(a1.hashCode() == a4.hashCode());
long ts=System.currentTimeMillis();
ApplicationAttemptId a6=createAppAttemptId(ts,543627,33492611);
Assert.assertEquals("appattempt_10_0001_000001",a1.toString());
Assert.assertEquals("appattempt_" + ts + "_543627_33492611",a6.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationId(){
ApplicationId a1=ApplicationId.newInstance(10l,1);
ApplicationId a2=ApplicationId.newInstance(10l,2);
ApplicationId a3=ApplicationId.newInstance(10l,1);
ApplicationId a4=ApplicationId.newInstance(8l,3);
Assert.assertFalse(a1.equals(a2));
Assert.assertFalse(a1.equals(a4));
Assert.assertTrue(a1.equals(a3));
Assert.assertTrue(a1.compareTo(a2) < 0);
Assert.assertTrue(a1.compareTo(a3) == 0);
Assert.assertTrue(a1.compareTo(a4) > 0);
Assert.assertTrue(a1.hashCode() == a3.hashCode());
Assert.assertFalse(a1.hashCode() == a2.hashCode());
Assert.assertFalse(a2.hashCode() == a4.hashCode());
long ts=System.currentTimeMillis();
ApplicationId a5=ApplicationId.newInstance(ts,45436343);
Assert.assertEquals("application_10_0001",a1.toString());
Assert.assertEquals("application_" + ts + "_45436343",a5.toString());
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationReport(){
long timestamp=System.currentTimeMillis();
ApplicationReport appReport1=createApplicationReport(1,1,timestamp);
ApplicationReport appReport2=createApplicationReport(1,1,timestamp);
ApplicationReport appReport3=createApplicationReport(1,1,timestamp);
Assert.assertEquals(appReport1,appReport2);
Assert.assertEquals(appReport2,appReport3);
appReport1.setApplicationId(null);
Assert.assertNull(appReport1.getApplicationId());
Assert.assertNotSame(appReport1,appReport2);
appReport2.setCurrentApplicationAttemptId(null);
Assert.assertNull(appReport2.getCurrentApplicationAttemptId());
Assert.assertNotSame(appReport2,appReport3);
Assert.assertNull(appReport1.getAMRMToken());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerId(){
ContainerId c1=newContainerId(1,1,10l,1);
ContainerId c2=newContainerId(1,1,10l,2);
ContainerId c3=newContainerId(1,1,10l,1);
ContainerId c4=newContainerId(1,3,10l,1);
ContainerId c5=newContainerId(1,3,8l,1);
Assert.assertTrue(c1.equals(c3));
Assert.assertFalse(c1.equals(c2));
Assert.assertFalse(c1.equals(c4));
Assert.assertFalse(c1.equals(c5));
Assert.assertTrue(c1.compareTo(c3) == 0);
Assert.assertTrue(c1.compareTo(c2) < 0);
Assert.assertTrue(c1.compareTo(c4) < 0);
Assert.assertTrue(c1.compareTo(c5) > 0);
Assert.assertTrue(c1.hashCode() == c3.hashCode());
Assert.assertFalse(c1.hashCode() == c2.hashCode());
Assert.assertFalse(c1.hashCode() == c4.hashCode());
Assert.assertFalse(c1.hashCode() == c5.hashCode());
long ts=System.currentTimeMillis();
ContainerId c6=newContainerId(36473,4365472,ts,25645811);
Assert.assertEquals("container_10_0001_01_000001",c1.toString());
Assert.assertEquals("container_" + ts + "_36473_4365472_25645811",c6.toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testResourceDecreaseContext(){
ContainerId containerId=ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1234,3),3),7);
Resource resource=Resource.newInstance(1023,3);
ContainerResourceDecrease ctx=ContainerResourceDecrease.newInstance(containerId,resource);
ContainerResourceDecreaseProto proto=((ContainerResourceDecreasePBImpl)ctx).getProto();
ctx=new ContainerResourceDecreasePBImpl(proto);
Assert.assertEquals(ctx.getCapability(),resource);
Assert.assertEquals(ctx.getContainerId(),containerId);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testResourceIncreaseContext(){
byte[] identifier=new byte[]{1,2,3,4};
Token token=Token.newInstance(identifier,"","".getBytes(),"");
ContainerId containerId=ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1234,3),3),7);
Resource resource=Resource.newInstance(1023,3);
ContainerResourceIncrease ctx=ContainerResourceIncrease.newInstance(containerId,resource,token);
ContainerResourceIncreaseProto proto=((ContainerResourceIncreasePBImpl)ctx).getProto();
ctx=new ContainerResourceIncreasePBImpl(proto);
Assert.assertEquals(ctx.getCapability(),resource);
Assert.assertEquals(ctx.getContainerId(),containerId);
Assert.assertTrue(Arrays.equals(ctx.getContainerToken().getIdentifier().array(),identifier));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void ContainerResourceIncreaseRequest(){
ContainerId containerId=ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1234,3),3),7);
Resource resource=Resource.newInstance(1023,3);
ContainerResourceIncreaseRequest context=ContainerResourceIncreaseRequest.newInstance(containerId,resource);
ContainerResourceIncreaseRequestProto proto=((ContainerResourceIncreaseRequestPBImpl)context).getProto();
ContainerResourceIncreaseRequest contextRecover=new ContainerResourceIncreaseRequestPBImpl(proto);
Assert.assertEquals(contextRecover.getContainerId(),containerId);
Assert.assertEquals(contextRecover.getCapability(),resource);
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=90000) public void testDSShellWithCustomLogPropertyFile() throws Exception {
final File basedir=new File("target",TestDistributedShell.class.getName());
final File tmpDir=new File(basedir,"tmpDir");
tmpDir.mkdirs();
final File customLogProperty=new File(tmpDir,"custom_log4j.properties");
if (customLogProperty.exists()) {
customLogProperty.delete();
}
if (!customLogProperty.createNewFile()) {
Assert.fail("Can not create custom log4j property file.");
}
PrintWriter fileWriter=new PrintWriter(customLogProperty);
fileWriter.write("log4j.rootLogger=debug,stdout");
fileWriter.close();
String[] args={"--jar",APPMASTER_JAR,"--num_containers","3","--shell_command","echo","--shell_args","HADOOP","--log_properties",customLogProperty.getAbsolutePath(),"--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"};
final Log LOG_Client=LogFactory.getLog(Client.class);
Assert.assertTrue(LOG_Client.isInfoEnabled());
Assert.assertFalse(LOG_Client.isDebugEnabled());
final Log LOG_AM=LogFactory.getLog(ApplicationMaster.class);
Assert.assertTrue(LOG_AM.isInfoEnabled());
Assert.assertFalse(LOG_AM.isDebugEnabled());
LOG.info("Initializing DS Client");
final Client client=new Client(new Configuration(yarnCluster.getConfig()));
boolean initSuccess=client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
boolean result=client.run();
LOG.info("Client run completed. Result=" + result);
Assert.assertTrue(verifyContainerLog(3,null,true,"DEBUG") > 10);
Assert.assertTrue(LOG_Client.isInfoEnabled());
Assert.assertTrue(LOG_Client.isDebugEnabled());
Assert.assertTrue(LOG_AM.isInfoEnabled());
Assert.assertTrue(LOG_AM.isDebugEnabled());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=90000) public void testDSShell() throws Exception {
String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"};
LOG.info("Initializing DS Client");
final Client client=new Client(new Configuration(yarnCluster.getConfig()));
boolean initSuccess=client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
final AtomicBoolean result=new AtomicBoolean(false);
Thread t=new Thread(){
public void run(){
try {
result.set(client.run());
}
catch ( Exception e) {
throw new RuntimeException(e);
}
}
}
;
t.start();
YarnClient yarnClient=YarnClient.createYarnClient();
yarnClient.init(new Configuration(yarnCluster.getConfig()));
yarnClient.start();
String hostName=NetUtils.getHostname();
boolean verified=false;
String errorMessage="";
while (!verified) {
List apps=yarnClient.getApplications();
if (apps.size() == 0) {
Thread.sleep(10);
continue;
}
ApplicationReport appReport=apps.get(0);
if (appReport.getHost().equals("N/A")) {
Thread.sleep(10);
continue;
}
errorMessage="Expected host name to start with '" + hostName + "', was '"+ appReport.getHost()+ "'. Expected rpc port to be '-1', was '"+ appReport.getRpcPort()+ "'.";
if (checkHostname(appReport.getHost()) && appReport.getRpcPort() == -1) {
verified=true;
}
if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED) {
break;
}
}
Assert.assertTrue(errorMessage,verified);
t.join();
LOG.info("Client run completed. Result=" + result);
Assert.assertTrue(result.get());
TimelineEntities entitiesAttempts=yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities(ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString(),null,null,null,null,null,null,null,null);
Assert.assertNotNull(entitiesAttempts);
Assert.assertEquals(1,entitiesAttempts.getEntities().size());
Assert.assertEquals(2,entitiesAttempts.getEntities().get(0).getEvents().size());
Assert.assertEquals(entitiesAttempts.getEntities().get(0).getEntityType().toString(),ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString());
TimelineEntities entities=yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities(ApplicationMaster.DSEntity.DS_CONTAINER.toString(),null,null,null,null,null,null,null,null);
Assert.assertNotNull(entities);
Assert.assertEquals(2,entities.getEntities().size());
Assert.assertEquals(entities.getEntities().get(0).getEntityType().toString(),ApplicationMaster.DSEntity.DS_CONTAINER.toString());
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=90000) public void testDSShellWithShellScript() throws Exception {
final File basedir=new File("target",TestDistributedShell.class.getName());
final File tmpDir=new File(basedir,"tmpDir");
tmpDir.mkdirs();
final File customShellScript=new File(tmpDir,"custom_script.sh");
if (customShellScript.exists()) {
customShellScript.delete();
}
if (!customShellScript.createNewFile()) {
Assert.fail("Can not create custom shell script file.");
}
PrintWriter fileWriter=new PrintWriter(customShellScript);
fileWriter.write("echo testDSShellWithShellScript");
fileWriter.close();
System.out.println(customShellScript.getAbsolutePath());
String[] args={"--jar",APPMASTER_JAR,"--num_containers","1","--shell_script",customShellScript.getAbsolutePath(),"--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"};
LOG.info("Initializing DS Client");
final Client client=new Client(new Configuration(yarnCluster.getConfig()));
boolean initSuccess=client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
boolean result=client.run();
LOG.info("Client run completed. Result=" + result);
List expectedContent=new ArrayList();
expectedContent.add("testDSShellWithShellScript");
verifyContainerLog(1,expectedContent,false,"");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testUMALauncher() throws Exception {
String classpath=getTestRuntimeClasspath();
String javaHome=System.getenv("JAVA_HOME");
if (javaHome == null) {
LOG.fatal("JAVA_HOME not defined. Test not running.");
return;
}
String[] args={"--classpath",classpath,"--queue","default","--cmd",javaHome + "/bin/java -Xmx512m " + TestUnmanagedAMLauncher.class.getCanonicalName()+ " success"};
LOG.info("Initializing Launcher");
UnmanagedAMLauncher launcher=new UnmanagedAMLauncher(new Configuration(yarnCluster.getConfig())){
public void launchAM( ApplicationAttemptId attemptId) throws IOException, YarnException {
YarnApplicationAttemptState attemptState=rmClient.getApplicationAttemptReport(attemptId).getYarnApplicationAttemptState();
Assert.assertTrue(attemptState.equals(YarnApplicationAttemptState.LAUNCHED));
super.launchAM(attemptId);
}
}
;
boolean initSuccess=launcher.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running Launcher");
boolean result=launcher.run();
LOG.info("Launcher run completed. Result=" + result);
Assert.assertTrue(result);
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=15000) public void testRenewDelegationTokenOnHA() throws Exception {
RenewDelegationTokenRequest request=RenewDelegationTokenRequest.newInstance(cluster.createFakeToken());
long newExpirationTime=ClientRMProxy.createRMProxy(this.conf,ApplicationClientProtocol.class).renewDelegationToken(request).getNextExpirationTime();
Assert.assertEquals(newExpirationTime,cluster.createNextExpirationTime());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetRMDelegationTokenService(){
String defaultRMAddress=YarnConfiguration.DEFAULT_RM_ADDRESS;
YarnConfiguration conf=new YarnConfiguration();
Text tokenService=ClientRMProxy.getRMDelegationTokenService(conf);
String[] services=tokenService.toString().split(",");
assertEquals(1,services.length);
for ( String service : services) {
assertTrue("Incorrect token service name",service.contains(defaultRMAddress));
}
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true);
conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2");
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME,"rm1"),"0.0.0.0");
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME,"rm2"),"0.0.0.0");
tokenService=ClientRMProxy.getRMDelegationTokenService(conf);
services=tokenService.toString().split(",");
assertEquals(2,services.length);
for ( String service : services) {
assertTrue("Incorrect token service name",service.contains(defaultRMAddress));
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRMWebAppRedirect() throws YarnException, InterruptedException, IOException {
cluster=new MiniYARNCluster(TestRMFailover.class.getName(),2,0,1,1);
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
cluster.init(conf);
cluster.start();
getAdminService(0).transitionToActive(req);
String rm1Url="http://0.0.0.0:18088";
String rm2Url="http://0.0.0.0:28088";
String header=getHeader("Refresh",rm2Url);
assertTrue(header.contains("; url=" + rm1Url));
header=getHeader("Refresh",rm2Url + "/metrics");
assertTrue(header.contains("; url=" + rm1Url));
header=getHeader("Refresh",rm2Url + "/jmx");
assertTrue(header.contains("; url=" + rm1Url));
header=getHeader("Refresh",rm2Url + "/cluster/cluster");
assertEquals(null,header);
header=getHeader("Refresh",rm2Url + "/conf");
assertEquals(null,header);
header=getHeader("Refresh",rm2Url + "/stacks");
assertEquals(null,header);
header=getHeader("Refresh",rm2Url + "/logLevel");
assertEquals(null,header);
header=getHeader("Refresh",rm2Url + "/static");
assertEquals(null,header);
header=getHeader("Refresh",rm2Url + "/logs");
assertEquals(null,header);
header=getHeader("Refresh",rm2Url + "/ws/v1/cluster/info");
assertEquals(null,header);
header=getHeader("Refresh",rm2Url + "/ws/v1/cluster/apps");
assertTrue(header.contains("; url=" + rm1Url));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Simple test Resource request.
* Test hashCode, equals and compare.
*/
@Test public void testResourceRequest(){
Resource resource=recordFactory.newRecordInstance(Resource.class);
Priority priority=recordFactory.newRecordInstance(Priority.class);
ResourceRequest original=ResourceRequest.newInstance(priority,"localhost",resource,2);
ResourceRequest copy=ResourceRequest.newInstance(priority,"localhost",resource,2);
assertTrue(original.equals(copy));
assertEquals(0,original.compareTo(copy));
assertTrue(original.hashCode() == copy.hashCode());
copy.setNumContainers(1);
assertFalse(original.equals(copy));
assertNotSame(0,original.compareTo(copy));
assertFalse(original.hashCode() == copy.hashCode());
}
APIUtilityVerifier IterativeVerifier EqualityVerifier
@SuppressWarnings("unchecked") @Test(timeout=10000) public void testAMRMClientAsync() throws Exception {
Configuration conf=new Configuration();
final AtomicBoolean heartbeatBlock=new AtomicBoolean(true);
List completed1=Arrays.asList(ContainerStatus.newInstance(newContainerId(0,0,0,0),ContainerState.COMPLETE,"",0));
List allocated1=Arrays.asList(Container.newInstance(null,null,null,null,null,null));
final AllocateResponse response1=createAllocateResponse(new ArrayList(),allocated1,null);
final AllocateResponse response2=createAllocateResponse(completed1,new ArrayList(),null);
final AllocateResponse emptyResponse=createAllocateResponse(new ArrayList(),new ArrayList(),null);
TestCallbackHandler callbackHandler=new TestCallbackHandler();
final AMRMClient client=mock(AMRMClientImpl.class);
final AtomicInteger secondHeartbeatSync=new AtomicInteger(0);
when(client.allocate(anyFloat())).thenReturn(response1).thenAnswer(new Answer(){
@Override public AllocateResponse answer( InvocationOnMock invocation) throws Throwable {
secondHeartbeatSync.incrementAndGet();
while (heartbeatBlock.get()) {
synchronized (heartbeatBlock) {
heartbeatBlock.wait();
}
}
secondHeartbeatSync.incrementAndGet();
return response2;
}
}
).thenReturn(emptyResponse);
when(client.registerApplicationMaster(anyString(),anyInt(),anyString())).thenReturn(null);
when(client.getAvailableResources()).thenAnswer(new Answer(){
@Override public Resource answer( InvocationOnMock invocation) throws Throwable {
synchronized (client) {
Thread.sleep(10);
}
return null;
}
}
);
AMRMClientAsync asyncClient=AMRMClientAsync.createAMRMClientAsync(client,20,callbackHandler);
asyncClient.init(conf);
asyncClient.start();
asyncClient.registerApplicationMaster("localhost",1234,null);
while (secondHeartbeatSync.get() < 1) {
Thread.sleep(10);
}
assert (secondHeartbeatSync.get() < 2);
asyncClient.getAvailableResources();
assert (secondHeartbeatSync.get() < 2);
synchronized (heartbeatBlock) {
heartbeatBlock.set(false);
heartbeatBlock.notifyAll();
}
Assert.assertEquals(null,callbackHandler.takeCompletedContainers());
while (callbackHandler.takeAllocatedContainers() == null) {
Assert.assertEquals(null,callbackHandler.takeCompletedContainers());
Thread.sleep(10);
}
while (callbackHandler.takeCompletedContainers() == null) {
Thread.sleep(10);
}
asyncClient.stop();
Assert.assertEquals(null,callbackHandler.takeAllocatedContainers());
Assert.assertEquals(null,callbackHandler.takeCompletedContainers());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testGetApplicationAttempt() throws YarnException, IOException {
Configuration conf=new Configuration();
final AHSClient client=new MockAHSClient();
client.init(conf);
client.start();
List expectedReports=((MockAHSClient)client).getReports();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
ApplicationAttemptReport report=client.getApplicationAttemptReport(appAttemptId);
Assert.assertNotNull(report);
Assert.assertEquals(report.getApplicationAttemptId().toString(),expectedReports.get(0).getCurrentApplicationAttemptId().toString());
client.stop();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testGetContainerReport() throws YarnException, IOException {
Configuration conf=new Configuration();
final AHSClient client=new MockAHSClient();
client.init(conf);
client.start();
List expectedReports=((MockAHSClient)client).getReports();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
ContainerId containerId=ContainerId.newInstance(appAttemptId,1);
ContainerReport report=client.getContainerReport(containerId);
Assert.assertNotNull(report);
Assert.assertEquals(report.getContainerId().toString(),(ContainerId.newInstance(expectedReports.get(0).getCurrentApplicationAttemptId(),1)).toString());
client.stop();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testGetContainers() throws YarnException, IOException {
Configuration conf=new Configuration();
final AHSClient client=new MockAHSClient();
client.init(conf);
client.start();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
List reports=client.getContainers(appAttemptId);
Assert.assertNotNull(reports);
Assert.assertEquals(reports.get(0).getContainerId(),(ContainerId.newInstance(appAttemptId,1)));
Assert.assertEquals(reports.get(1).getContainerId(),(ContainerId.newInstance(appAttemptId,2)));
client.stop();
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAMRMClientMatchingFit() throws YarnException, IOException {
AMRMClient amClient=null;
try {
amClient=AMRMClient.createAMRMClient();
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("Host",10000,"");
Resource capability1=Resource.newInstance(1024,2);
Resource capability2=Resource.newInstance(1024,1);
Resource capability3=Resource.newInstance(1000,2);
Resource capability4=Resource.newInstance(2000,1);
Resource capability5=Resource.newInstance(1000,3);
Resource capability6=Resource.newInstance(2000,1);
Resource capability7=Resource.newInstance(2000,1);
ContainerRequest storedContainer1=new ContainerRequest(capability1,nodes,racks,priority);
ContainerRequest storedContainer2=new ContainerRequest(capability2,nodes,racks,priority);
ContainerRequest storedContainer3=new ContainerRequest(capability3,nodes,racks,priority);
ContainerRequest storedContainer4=new ContainerRequest(capability4,nodes,racks,priority);
ContainerRequest storedContainer5=new ContainerRequest(capability5,nodes,racks,priority);
ContainerRequest storedContainer6=new ContainerRequest(capability6,nodes,racks,priority);
ContainerRequest storedContainer7=new ContainerRequest(capability7,nodes,racks,priority2,false);
amClient.addContainerRequest(storedContainer1);
amClient.addContainerRequest(storedContainer2);
amClient.addContainerRequest(storedContainer3);
amClient.addContainerRequest(storedContainer4);
amClient.addContainerRequest(storedContainer5);
amClient.addContainerRequest(storedContainer6);
amClient.addContainerRequest(storedContainer7);
List extends Collection> matches;
ContainerRequest storedRequest;
Resource testCapability1=Resource.newInstance(1024,2);
matches=amClient.getMatchingRequests(priority,node,testCapability1);
verifyMatches(matches,1);
storedRequest=matches.get(0).iterator().next();
assertEquals(storedContainer1,storedRequest);
amClient.removeContainerRequest(storedContainer1);
Resource testCapability2=Resource.newInstance(2000,1);
matches=amClient.getMatchingRequests(priority,node,testCapability2);
verifyMatches(matches,2);
int i=0;
for ( ContainerRequest storedRequest1 : matches.get(0)) {
if (i++ == 0) {
assertEquals(storedContainer4,storedRequest1);
}
else {
assertEquals(storedContainer6,storedRequest1);
}
}
amClient.removeContainerRequest(storedContainer6);
Resource testCapability3=Resource.newInstance(4000,4);
matches=amClient.getMatchingRequests(priority,node,testCapability3);
assert (matches.size() == 4);
Resource testCapability4=Resource.newInstance(1024,2);
matches=amClient.getMatchingRequests(priority,node,testCapability4);
assert (matches.size() == 2);
for ( Collection testSet : matches) {
assertEquals(1,testSet.size());
ContainerRequest testRequest=testSet.iterator().next();
assertTrue(testRequest != storedContainer4);
assertTrue(testRequest != storedContainer5);
assert (testRequest == storedContainer2 || testRequest == storedContainer3);
}
Resource testCapability5=Resource.newInstance(512,4);
matches=amClient.getMatchingRequests(priority,node,testCapability5);
assert (matches.size() == 0);
Resource testCapability7=Resource.newInstance(2000,1);
matches=amClient.getMatchingRequests(priority2,ResourceRequest.ANY,testCapability7);
assert (matches.size() == 0);
matches=amClient.getMatchingRequests(priority2,node,testCapability7);
assert (matches.size() == 1);
amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
}
finally {
if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
amClient.stop();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAllocationWithBlacklist() throws YarnException, IOException {
AMRMClientImpl amClient=null;
try {
amClient=(AMRMClientImpl)AMRMClient.createAMRMClient();
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("Host",10000,"");
assertEquals(0,amClient.ask.size());
assertEquals(0,amClient.release.size());
ContainerRequest storedContainer1=new ContainerRequest(capability,nodes,racks,priority);
amClient.addContainerRequest(storedContainer1);
assertEquals(3,amClient.ask.size());
assertEquals(0,amClient.release.size());
List localNodeBlacklist=new ArrayList();
localNodeBlacklist.add(node);
amClient.updateBlacklist(localNodeBlacklist,null);
int allocatedContainerCount=getAllocatedContainersNumber(amClient,DEFAULT_ITERATION);
assertEquals(0,allocatedContainerCount);
amClient.updateBlacklist(null,localNodeBlacklist);
ContainerRequest storedContainer2=new ContainerRequest(capability,nodes,racks,priority);
amClient.addContainerRequest(storedContainer2);
allocatedContainerCount=getAllocatedContainersNumber(amClient,DEFAULT_ITERATION);
assertEquals(2,allocatedContainerCount);
assertTrue(amClient.blacklistAdditions.isEmpty());
assertTrue(amClient.blacklistRemovals.isEmpty());
ContainerRequest invalidContainerRequest=new ContainerRequest(Resource.newInstance(-1024,1),nodes,racks,priority);
amClient.addContainerRequest(invalidContainerRequest);
amClient.updateBlacklist(localNodeBlacklist,null);
try {
amClient.allocate(0.1f);
fail("there should be an exception here.");
}
catch ( Exception e) {
assertEquals(1,amClient.blacklistAdditions.size());
}
}
finally {
if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
amClient.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAMRMClientMatchingFitInferredRack() throws YarnException, IOException {
AMRMClientImpl amClient=null;
try {
amClient=new AMRMClientImpl();
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("Host",10000,"");
Resource capability=Resource.newInstance(1024,2);
ContainerRequest storedContainer1=new ContainerRequest(capability,nodes,null,priority);
amClient.addContainerRequest(storedContainer1);
List extends Collection> matches;
ContainerRequest storedRequest;
matches=amClient.getMatchingRequests(priority,node,capability);
verifyMatches(matches,1);
storedRequest=matches.get(0).iterator().next();
assertEquals(storedContainer1,storedRequest);
matches=amClient.getMatchingRequests(priority,rack,capability);
verifyMatches(matches,1);
storedRequest=matches.get(0).iterator().next();
assertEquals(storedContainer1,storedRequest);
amClient.removeContainerRequest(storedContainer1);
matches=amClient.getMatchingRequests(priority,rack,capability);
assertTrue(matches.isEmpty());
amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
}
finally {
if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
amClient.stop();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testAMRMClientMatchStorage() throws YarnException, IOException {
AMRMClientImpl amClient=null;
try {
amClient=(AMRMClientImpl)AMRMClient.createAMRMClient();
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("Host",10000,"");
Priority priority1=Records.newRecord(Priority.class);
priority1.setPriority(2);
ContainerRequest storedContainer1=new ContainerRequest(capability,nodes,racks,priority);
ContainerRequest storedContainer2=new ContainerRequest(capability,nodes,racks,priority);
ContainerRequest storedContainer3=new ContainerRequest(capability,null,null,priority1);
amClient.addContainerRequest(storedContainer1);
amClient.addContainerRequest(storedContainer2);
amClient.addContainerRequest(storedContainer3);
int containersRequestedAny=amClient.remoteRequestsTable.get(priority).get(ResourceRequest.ANY).get(capability).remoteRequest.getNumContainers();
assertEquals(2,containersRequestedAny);
containersRequestedAny=amClient.remoteRequestsTable.get(priority1).get(ResourceRequest.ANY).get(capability).remoteRequest.getNumContainers();
assertEquals(1,containersRequestedAny);
List extends Collection> matches=amClient.getMatchingRequests(priority,node,capability);
verifyMatches(matches,2);
matches=amClient.getMatchingRequests(priority,rack,capability);
verifyMatches(matches,2);
matches=amClient.getMatchingRequests(priority,ResourceRequest.ANY,capability);
verifyMatches(matches,2);
matches=amClient.getMatchingRequests(priority1,rack,capability);
assertTrue(matches.isEmpty());
matches=amClient.getMatchingRequests(priority1,ResourceRequest.ANY,capability);
verifyMatches(matches,1);
amClient.removeContainerRequest(storedContainer3);
matches=amClient.getMatchingRequests(priority,node,capability);
verifyMatches(matches,2);
amClient.removeContainerRequest(storedContainer2);
matches=amClient.getMatchingRequests(priority,node,capability);
verifyMatches(matches,1);
matches=amClient.getMatchingRequests(priority,rack,capability);
verifyMatches(matches,1);
ContainerRequest storedRequest=matches.get(0).iterator().next();
assertEquals(storedContainer1,storedRequest);
amClient.removeContainerRequest(storedContainer1);
matches=amClient.getMatchingRequests(priority,ResourceRequest.ANY,capability);
assertTrue(matches.isEmpty());
matches=amClient.getMatchingRequests(priority1,ResourceRequest.ANY,capability);
assertTrue(matches.isEmpty());
assertTrue(amClient.remoteRequestsTable.isEmpty());
amClient.addContainerRequest(storedContainer1);
amClient.addContainerRequest(storedContainer3);
int allocatedContainerCount=0;
int iterationsLeft=3;
while (allocatedContainerCount < 2 && iterationsLeft-- > 0) {
Log.info(" == alloc " + allocatedContainerCount + " it left "+ iterationsLeft);
AllocateResponse allocResponse=amClient.allocate(0.1f);
assertEquals(0,amClient.ask.size());
assertEquals(0,amClient.release.size());
assertEquals(nodeCount,amClient.getClusterNodeCount());
allocatedContainerCount+=allocResponse.getAllocatedContainers().size();
for ( Container container : allocResponse.getAllocatedContainers()) {
ContainerRequest expectedRequest=container.getPriority().equals(storedContainer1.getPriority()) ? storedContainer1 : storedContainer3;
matches=amClient.getMatchingRequests(container.getPriority(),ResourceRequest.ANY,container.getResource());
verifyMatches(matches,1);
ContainerRequest matchedRequest=matches.get(0).iterator().next();
assertEquals(matchedRequest,expectedRequest);
amClient.removeContainerRequest(matchedRequest);
amClient.releaseAssignedContainer(container.getId());
}
if (allocatedContainerCount < containersRequestedAny) {
sleep(100);
}
}
assertEquals(2,allocatedContainerCount);
AllocateResponse allocResponse=amClient.allocate(0.1f);
assertEquals(0,amClient.release.size());
assertEquals(0,amClient.ask.size());
assertEquals(0,allocResponse.getAllocatedContainers().size());
assertTrue(amClient.remoteRequestsTable.isEmpty());
amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
}
finally {
if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
amClient.stop();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testPostEntitiesTimelineServiceDefaultNotEnabled() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.unset(YarnConfiguration.TIMELINE_SERVICE_ENABLED);
TimelineClientImpl client=createTimelineClient(conf);
mockClientResponse(client,ClientResponse.Status.INTERNAL_SERVER_ERROR,false,false);
try {
TimelinePutResponse response=client.putEntities(generateEntity());
Assert.assertEquals(0,response.getErrors().size());
}
catch ( YarnException e) {
Assert.fail("putEntities should already return before throwing the exception");
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testPostEntitiesTimelineServiceNotEnabled() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,false);
TimelineClientImpl client=createTimelineClient(conf);
mockClientResponse(client,ClientResponse.Status.INTERNAL_SERVER_ERROR,false,false);
try {
TimelinePutResponse response=client.putEntities(generateEntity());
Assert.assertEquals(0,response.getErrors().size());
}
catch ( YarnException e) {
Assert.fail("putEntities should already return before throwing the exception");
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testGetContainers() throws YarnException, IOException {
Configuration conf=new Configuration();
final YarnClient client=new MockYarnClient();
client.init(conf);
client.start();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
List reports=client.getContainers(appAttemptId);
Assert.assertNotNull(reports);
Assert.assertEquals(reports.get(0).getContainerId(),(ContainerId.newInstance(appAttemptId,1)));
Assert.assertEquals(reports.get(1).getContainerId(),(ContainerId.newInstance(appAttemptId,2)));
client.stop();
}
APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier
@Test(timeout=30000) public void testAMMRTokens() throws Exception {
MiniYARNCluster cluster=new MiniYARNCluster("testMRAMTokens",1,1,1);
YarnClient rmClient=null;
try {
cluster.init(new YarnConfiguration());
cluster.start();
final Configuration yarnConf=cluster.getConfig();
rmClient=YarnClient.createYarnClient();
rmClient.init(yarnConf);
rmClient.start();
ApplicationId appId=createApp(rmClient,false);
waitTillAccepted(rmClient,appId);
Assert.assertNull(rmClient.getAMRMToken(appId));
appId=createApp(rmClient,true);
waitTillAccepted(rmClient,appId);
long start=System.currentTimeMillis();
while (rmClient.getAMRMToken(appId) == null) {
if (System.currentTimeMillis() - start > 20 * 1000) {
Assert.fail("AMRM token is null");
}
Thread.sleep(100);
}
Assert.assertNotNull(rmClient.getAMRMToken(appId));
UserGroupInformation other=UserGroupInformation.createUserForTesting("foo",new String[]{});
appId=other.doAs(new PrivilegedExceptionAction(){
@Override public ApplicationId run() throws Exception {
YarnClient rmClient=YarnClient.createYarnClient();
rmClient.init(yarnConf);
rmClient.start();
ApplicationId appId=createApp(rmClient,true);
waitTillAccepted(rmClient,appId);
long start=System.currentTimeMillis();
while (rmClient.getAMRMToken(appId) == null) {
if (System.currentTimeMillis() - start > 20 * 1000) {
Assert.fail("AMRM token is null");
}
Thread.sleep(100);
}
Assert.assertNotNull(rmClient.getAMRMToken(appId));
return appId;
}
}
);
Assert.assertNull(rmClient.getAMRMToken(appId));
}
finally {
if (rmClient != null) {
rmClient.stop();
}
cluster.stop();
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testGetApplicationAttempt() throws YarnException, IOException {
Configuration conf=new Configuration();
final YarnClient client=new MockYarnClient();
client.init(conf);
client.start();
List expectedReports=((MockYarnClient)client).getReports();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
ApplicationAttemptReport report=client.getApplicationAttemptReport(appAttemptId);
Assert.assertNotNull(report);
Assert.assertEquals(report.getApplicationAttemptId().toString(),expectedReports.get(0).getCurrentApplicationAttemptId().toString());
client.stop();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testGetContainerReport() throws YarnException, IOException {
Configuration conf=new Configuration();
final YarnClient client=new MockYarnClient();
client.init(conf);
client.start();
List expectedReports=((MockYarnClient)client).getReports();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
ContainerId containerId=ContainerId.newInstance(appAttemptId,1);
ContainerReport report=client.getContainerReport(containerId);
Assert.assertNotNull(report);
Assert.assertEquals(report.getContainerId().toString(),(ContainerId.newInstance(expectedReports.get(0).getCurrentApplicationAttemptId(),1)).toString());
client.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000l) public void testHelpMessage() throws Exception {
Configuration conf=new YarnConfiguration();
YarnClient mockYarnClient=createMockYarnClient(YarnApplicationState.FINISHED);
LogsCLI dumper=new LogsCLIForTest(mockYarnClient);
dumper.setConf(conf);
int exitCode=dumper.run(new String[]{});
assertTrue(exitCode == -1);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Retrieve logs for completed YARN applications.");
pw.println("usage: yarn logs -applicationId [OPTIONS]");
pw.println();
pw.println("general options are:");
pw.println(" -appOwner AppOwner (assumed to be current user if");
pw.println(" not specified)");
pw.println(" -containerId ContainerId (must be specified if node");
pw.println(" address is specified)");
pw.println(" -nodeAddress NodeAddress in the format nodename:port");
pw.println(" (must be specified if container id is");
pw.println(" specified)");
pw.close();
String appReportStr=baos.toString("UTF-8");
Assert.assertEquals(appReportStr,sysOutStream.toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetApplicationAttempts() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(applicationId,1);
ApplicationAttemptId attemptId1=ApplicationAttemptId.newInstance(applicationId,2);
ApplicationAttemptReport attemptReport=ApplicationAttemptReport.newInstance(attemptId,"host",124,"url","diagnostics",YarnApplicationAttemptState.FINISHED,ContainerId.newInstance(attemptId,1));
ApplicationAttemptReport attemptReport1=ApplicationAttemptReport.newInstance(attemptId1,"host",124,"url","diagnostics",YarnApplicationAttemptState.FINISHED,ContainerId.newInstance(attemptId1,1));
List reports=new ArrayList();
reports.add(attemptReport);
reports.add(attemptReport1);
when(client.getApplicationAttempts(any(ApplicationId.class))).thenReturn(reports);
int result=cli.run(new String[]{"applicationattempt","-list",applicationId.toString()});
assertEquals(0,result);
verify(client).getApplicationAttempts(applicationId);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Total number of application attempts :2");
pw.print(" ApplicationAttempt-Id");
pw.print("\t State");
pw.print("\t AM-Container-Id");
pw.println("\t Tracking-URL");
pw.print(" appattempt_1234_0005_000001");
pw.print("\t FINISHED");
pw.print("\t container_1234_0005_01_000001");
pw.println("\t url");
pw.print(" appattempt_1234_0005_000002");
pw.print("\t FINISHED");
pw.print("\t container_1234_0005_02_000001");
pw.println("\t url");
pw.close();
String appReportStr=baos.toString("UTF-8");
Assert.assertEquals(appReportStr,sysOutStream.toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetContainers() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(applicationId,1);
ContainerId containerId=ContainerId.newInstance(attemptId,1);
ContainerId containerId1=ContainerId.newInstance(attemptId,2);
ContainerReport container=ContainerReport.newInstance(containerId,null,NodeId.newInstance("host",1234),Priority.UNDEFINED,1234,5678,"diagnosticInfo","logURL",0,ContainerState.COMPLETE);
ContainerReport container1=ContainerReport.newInstance(containerId1,null,NodeId.newInstance("host",1234),Priority.UNDEFINED,1234,5678,"diagnosticInfo","logURL",0,ContainerState.COMPLETE);
List reports=new ArrayList();
reports.add(container);
reports.add(container1);
when(client.getContainers(any(ApplicationAttemptId.class))).thenReturn(reports);
int result=cli.run(new String[]{"container","-list",attemptId.toString()});
assertEquals(0,result);
verify(client).getContainers(attemptId);
Log.info(sysOutStream.toString());
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Total number of containers :2");
pw.print(" Container-Id");
pw.print("\t Start Time");
pw.print("\t Finish Time");
pw.print("\t State");
pw.print("\t Host");
pw.println("\t LOG-URL");
pw.print(" container_1234_0005_01_000001");
pw.print("\t 1234");
pw.print("\t 5678");
pw.print("\t COMPLETE");
pw.print("\t host:1234");
pw.println("\t logURL");
pw.print(" container_1234_0005_01_000002");
pw.print("\t 1234");
pw.print("\t 5678");
pw.print("\t COMPLETE");
pw.print("\t host:1234");
pw.println("\t logURL");
pw.close();
String appReportStr=baos.toString("UTF-8");
Assert.assertEquals(appReportStr,sysOutStream.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testAppsHelpCommand() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationCLI spyCli=spy(cli);
int result=spyCli.run(new String[]{"application","-help"});
Assert.assertTrue(result == 0);
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
result=cli.run(new String[]{"application","-kill",applicationId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
NodeId nodeId=NodeId.newInstance("host0",0);
result=cli.run(new String[]{"application","-status",nodeId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationCLIHelpMessage(),sysOutStream.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testContainersHelpCommand() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationCLI spyCli=spy(cli);
int result=spyCli.run(new String[]{"container","-help"});
Assert.assertTrue(result == 0);
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createContainerCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,6);
result=cli.run(new String[]{"container","-list",appAttemptId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createContainerCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
ContainerId containerId=ContainerId.newInstance(appAttemptId,7);
result=cli.run(new String[]{"container","-status",containerId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createContainerCLIHelpMessage(),sysOutStream.toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testListClusterNodes() throws Exception {
List nodeReports=new ArrayList();
nodeReports.addAll(getNodeReports(1,NodeState.NEW));
nodeReports.addAll(getNodeReports(2,NodeState.RUNNING));
nodeReports.addAll(getNodeReports(1,NodeState.UNHEALTHY));
nodeReports.addAll(getNodeReports(1,NodeState.DECOMMISSIONED));
nodeReports.addAll(getNodeReports(1,NodeState.REBOOTED));
nodeReports.addAll(getNodeReports(1,NodeState.LOST));
NodeCLI cli=new NodeCLI();
cli.setClient(client);
cli.setSysOutPrintStream(sysOut);
Set nodeStates=new HashSet();
nodeStates.add(NodeState.NEW);
NodeState[] states=nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates));
int result=cli.run(new String[]{"-list","--states","NEW"});
assertEquals(0,result);
verify(client).getNodeReports(states);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Total Nodes:1");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Number-of-Running-Containers");
pw.print(" host0:0\t NEW\t host1:8888\t");
pw.println(" 0");
pw.close();
String nodesReportStr=baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(1)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.RUNNING);
states=nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates));
result=cli.run(new String[]{"-list","--states","RUNNING"});
assertEquals(0,result);
verify(client).getNodeReports(states);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total Nodes:2");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Number-of-Running-Containers");
pw.print(" host0:0\t RUNNING\t host1:8888\t");
pw.println(" 0");
pw.print(" host1:0\t RUNNING\t host1:8888\t");
pw.println(" 0");
pw.close();
nodesReportStr=baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(2)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
result=cli.run(new String[]{"-list"});
assertEquals(0,result);
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(3)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.UNHEALTHY);
states=nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates));
result=cli.run(new String[]{"-list","--states","UNHEALTHY"});
assertEquals(0,result);
verify(client).getNodeReports(states);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total Nodes:1");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Number-of-Running-Containers");
pw.print(" host0:0\t UNHEALTHY\t host1:8888\t");
pw.println(" 0");
pw.close();
nodesReportStr=baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(4)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.DECOMMISSIONED);
states=nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates));
result=cli.run(new String[]{"-list","--states","DECOMMISSIONED"});
assertEquals(0,result);
verify(client).getNodeReports(states);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total Nodes:1");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Number-of-Running-Containers");
pw.print(" host0:0\t DECOMMISSIONED\t host1:8888\t");
pw.println(" 0");
pw.close();
nodesReportStr=baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(5)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.REBOOTED);
states=nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates));
result=cli.run(new String[]{"-list","--states","REBOOTED"});
assertEquals(0,result);
verify(client).getNodeReports(states);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total Nodes:1");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Number-of-Running-Containers");
pw.print(" host0:0\t REBOOTED\t host1:8888\t");
pw.println(" 0");
pw.close();
nodesReportStr=baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(6)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.LOST);
states=nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates));
result=cli.run(new String[]{"-list","--states","LOST"});
assertEquals(0,result);
verify(client).getNodeReports(states);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total Nodes:1");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Number-of-Running-Containers");
pw.print(" host0:0\t LOST\t host1:8888\t");
pw.println(" 0");
pw.close();
nodesReportStr=baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(7)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.NEW);
nodeStates.add(NodeState.RUNNING);
nodeStates.add(NodeState.LOST);
nodeStates.add(NodeState.REBOOTED);
states=nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates));
result=cli.run(new String[]{"-list","--states","NEW,RUNNING,LOST,REBOOTED"});
assertEquals(0,result);
verify(client).getNodeReports(states);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total Nodes:5");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Number-of-Running-Containers");
pw.print(" host0:0\t NEW\t host1:8888\t");
pw.println(" 0");
pw.print(" host0:0\t RUNNING\t host1:8888\t");
pw.println(" 0");
pw.print(" host1:0\t RUNNING\t host1:8888\t");
pw.println(" 0");
pw.print(" host0:0\t REBOOTED\t host1:8888\t");
pw.println(" 0");
pw.print(" host0:0\t LOST\t host1:8888\t");
pw.println(" 0");
pw.close();
nodesReportStr=baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(8)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
nodeStates.clear();
for ( NodeState s : NodeState.values()) {
nodeStates.add(s);
}
states=nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates));
result=cli.run(new String[]{"-list","--all"});
assertEquals(0,result);
verify(client).getNodeReports(states);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total Nodes:7");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Number-of-Running-Containers");
pw.print(" host0:0\t NEW\t host1:8888\t");
pw.println(" 0");
pw.print(" host0:0\t RUNNING\t host1:8888\t");
pw.println(" 0");
pw.print(" host1:0\t RUNNING\t host1:8888\t");
pw.println(" 0");
pw.print(" host0:0\t UNHEALTHY\t host1:8888\t");
pw.println(" 0");
pw.print(" host0:0\t DECOMMISSIONED\t host1:8888\t");
pw.println(" 0");
pw.print(" host0:0\t REBOOTED\t host1:8888\t");
pw.println(" 0");
pw.print(" host0:0\t LOST\t host1:8888\t");
pw.println(" 0");
pw.close();
nodesReportStr=baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(9)).write(any(byte[].class),anyInt(),anyInt());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testAppAttemptsHelpCommand() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationCLI spyCli=spy(cli);
int result=spyCli.run(new String[]{"applicationattempt","-help"});
Assert.assertTrue(result == 0);
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationAttemptCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
result=cli.run(new String[]{"applicationattempt","-list",applicationId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationAttemptCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,6);
result=cli.run(new String[]{"applicationattempt","-status",appAttemptId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationAttemptCLIHelpMessage(),sysOutStream.toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetApplicationAttemptReport() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(applicationId,1);
ApplicationAttemptReport attemptReport=ApplicationAttemptReport.newInstance(attemptId,"host",124,"url","diagnostics",YarnApplicationAttemptState.FINISHED,ContainerId.newInstance(attemptId,1));
when(client.getApplicationAttemptReport(any(ApplicationAttemptId.class))).thenReturn(attemptReport);
int result=cli.run(new String[]{"applicationattempt","-status",attemptId.toString()});
assertEquals(0,result);
verify(client).getApplicationAttemptReport(attemptId);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Application Attempt Report : ");
pw.println("\tApplicationAttempt-Id : appattempt_1234_0005_000001");
pw.println("\tState : FINISHED");
pw.println("\tAMContainer : container_1234_0005_01_000001");
pw.println("\tTracking-URL : url");
pw.println("\tRPC Port : 124");
pw.println("\tAM Host : host");
pw.println("\tDiagnostics : diagnostics");
pw.close();
String appReportStr=baos.toString("UTF-8");
Assert.assertEquals(appReportStr,sysOutStream.toString());
verify(sysOut,times(1)).println(isA(String.class));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetApplications() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationReport newApplicationReport=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.RUNNING,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null);
List applicationReports=new ArrayList();
applicationReports.add(newApplicationReport);
ApplicationId applicationId2=ApplicationId.newInstance(1234,6);
ApplicationReport newApplicationReport2=ApplicationReport.newInstance(applicationId2,ApplicationAttemptId.newInstance(applicationId2,2),"user2","queue2","appname2","host2",125,null,YarnApplicationState.FINISHED,"diagnostics2","url2",2,2,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.63789f,"NON-YARN",null);
applicationReports.add(newApplicationReport2);
ApplicationId applicationId3=ApplicationId.newInstance(1234,7);
ApplicationReport newApplicationReport3=ApplicationReport.newInstance(applicationId3,ApplicationAttemptId.newInstance(applicationId3,3),"user3","queue3","appname3","host3",126,null,YarnApplicationState.RUNNING,"diagnostics3","url3",3,3,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.73789f,"MAPREDUCE",null);
applicationReports.add(newApplicationReport3);
ApplicationId applicationId4=ApplicationId.newInstance(1234,8);
ApplicationReport newApplicationReport4=ApplicationReport.newInstance(applicationId4,ApplicationAttemptId.newInstance(applicationId4,4),"user4","queue4","appname4","host4",127,null,YarnApplicationState.FAILED,"diagnostics4","url4",4,4,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.83789f,"NON-MAPREDUCE",null);
applicationReports.add(newApplicationReport4);
ApplicationId applicationId5=ApplicationId.newInstance(1234,9);
ApplicationReport newApplicationReport5=ApplicationReport.newInstance(applicationId5,ApplicationAttemptId.newInstance(applicationId5,5),"user5","queue5","appname5","host5",128,null,YarnApplicationState.ACCEPTED,"diagnostics5","url5",5,5,FinalApplicationStatus.KILLED,null,"N/A",0.93789f,"HIVE",null);
applicationReports.add(newApplicationReport5);
ApplicationId applicationId6=ApplicationId.newInstance(1234,10);
ApplicationReport newApplicationReport6=ApplicationReport.newInstance(applicationId6,ApplicationAttemptId.newInstance(applicationId6,6),"user6","queue6","appname6","host6",129,null,YarnApplicationState.SUBMITTED,"diagnostics6","url6",6,6,FinalApplicationStatus.KILLED,null,"N/A",0.99789f,"PIG",null);
applicationReports.add(newApplicationReport6);
Set appType1=new HashSet();
EnumSet appState1=EnumSet.noneOf(YarnApplicationState.class);
appState1.add(YarnApplicationState.RUNNING);
appState1.add(YarnApplicationState.ACCEPTED);
appState1.add(YarnApplicationState.SUBMITTED);
when(client.getApplications(appType1,appState1)).thenReturn(getApplicationReports(applicationReports,appType1,appState1,false));
int result=cli.run(new String[]{"application","-list"});
assertEquals(0,result);
verify(client).getApplications(appType1,appState1);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Total number of applications (application-types: " + appType1 + " and states: "+ appState1+ ")"+ ":"+ 4);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
pw.print("Final-State\t Progress");
pw.println("\t Tracking-URL");
pw.print(" application_1234_0005\t ");
pw.print("appname\t YARN\t user\t ");
pw.print("queue\t RUNNING\t ");
pw.print("SUCCEEDED\t 53.79%");
pw.println("\t N/A");
pw.print(" application_1234_0007\t ");
pw.print("appname3\t MAPREDUCE\t user3\t ");
pw.print("queue3\t RUNNING\t ");
pw.print("SUCCEEDED\t 73.79%");
pw.println("\t N/A");
pw.print(" application_1234_0009\t ");
pw.print("appname5\t HIVE\t user5\t ");
pw.print("queue5\t ACCEPTED\t ");
pw.print("KILLED\t 93.79%");
pw.println("\t N/A");
pw.print(" application_1234_0010\t ");
pw.print("appname6\t PIG\t user6\t ");
pw.print("queue6\t SUBMITTED\t ");
pw.print("KILLED\t 99.79%");
pw.println("\t N/A");
pw.close();
String appsReportStr=baos.toString("UTF-8");
Assert.assertEquals(appsReportStr,sysOutStream.toString());
verify(sysOut,times(1)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
Set appType2=new HashSet();
appType2.add("YARN");
appType2.add("NON-YARN");
EnumSet appState2=EnumSet.noneOf(YarnApplicationState.class);
appState2.add(YarnApplicationState.RUNNING);
appState2.add(YarnApplicationState.ACCEPTED);
appState2.add(YarnApplicationState.SUBMITTED);
when(client.getApplications(appType2,appState2)).thenReturn(getApplicationReports(applicationReports,appType2,appState2,false));
result=cli.run(new String[]{"application","-list","-appTypes","YARN, ,, NON-YARN"," ,, ,,"});
assertEquals(0,result);
verify(client).getApplications(appType2,appState2);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total number of applications (application-types: " + appType2 + " and states: "+ appState2+ ")"+ ":"+ 1);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
pw.print("Final-State\t Progress");
pw.println("\t Tracking-URL");
pw.print(" application_1234_0005\t ");
pw.print("appname\t YARN\t user\t ");
pw.print("queue\t RUNNING\t ");
pw.print("SUCCEEDED\t 53.79%");
pw.println("\t N/A");
pw.close();
appsReportStr=baos.toString("UTF-8");
Assert.assertEquals(appsReportStr,sysOutStream.toString());
verify(sysOut,times(2)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
Set appType3=new HashSet();
EnumSet appState3=EnumSet.noneOf(YarnApplicationState.class);
appState3.add(YarnApplicationState.FINISHED);
appState3.add(YarnApplicationState.FAILED);
when(client.getApplications(appType3,appState3)).thenReturn(getApplicationReports(applicationReports,appType3,appState3,false));
result=cli.run(new String[]{"application","-list","--appStates","FINISHED ,, , FAILED",",,FINISHED"});
assertEquals(0,result);
verify(client).getApplications(appType3,appState3);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total number of applications (application-types: " + appType3 + " and states: "+ appState3+ ")"+ ":"+ 2);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
pw.print("Final-State\t Progress");
pw.println("\t Tracking-URL");
pw.print(" application_1234_0006\t ");
pw.print("appname2\t NON-YARN\t user2\t ");
pw.print("queue2\t FINISHED\t ");
pw.print("SUCCEEDED\t 63.79%");
pw.println("\t N/A");
pw.print(" application_1234_0008\t ");
pw.print("appname4\t NON-MAPREDUCE\t user4\t ");
pw.print("queue4\t FAILED\t ");
pw.print("SUCCEEDED\t 83.79%");
pw.println("\t N/A");
pw.close();
appsReportStr=baos.toString("UTF-8");
Assert.assertEquals(appsReportStr,sysOutStream.toString());
verify(sysOut,times(3)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
Set appType4=new HashSet();
appType4.add("YARN");
appType4.add("NON-YARN");
EnumSet appState4=EnumSet.noneOf(YarnApplicationState.class);
appState4.add(YarnApplicationState.FINISHED);
appState4.add(YarnApplicationState.FAILED);
when(client.getApplications(appType4,appState4)).thenReturn(getApplicationReports(applicationReports,appType4,appState4,false));
result=cli.run(new String[]{"application","-list","--appTypes","YARN,NON-YARN","--appStates","FINISHED ,, , FAILED"});
assertEquals(0,result);
verify(client).getApplications(appType2,appState2);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total number of applications (application-types: " + appType4 + " and states: "+ appState4+ ")"+ ":"+ 1);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
pw.print("Final-State\t Progress");
pw.println("\t Tracking-URL");
pw.print(" application_1234_0006\t ");
pw.print("appname2\t NON-YARN\t user2\t ");
pw.print("queue2\t FINISHED\t ");
pw.print("SUCCEEDED\t 63.79%");
pw.println("\t N/A");
pw.close();
appsReportStr=baos.toString("UTF-8");
Assert.assertEquals(appsReportStr,sysOutStream.toString());
verify(sysOut,times(4)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
result=cli.run(new String[]{"application","-list","--appStates","FINISHED ,, , INVALID"});
assertEquals(-1,result);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("The application state INVALID is invalid.");
pw.print("The valid application state can be one of the following: ");
StringBuilder sb=new StringBuilder();
sb.append("ALL,");
for ( YarnApplicationState state : YarnApplicationState.values()) {
sb.append(state + ",");
}
String output=sb.toString();
pw.println(output.substring(0,output.length() - 1));
pw.close();
appsReportStr=baos.toString("UTF-8");
Assert.assertEquals(appsReportStr,sysOutStream.toString());
verify(sysOut,times(4)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
Set appType5=new HashSet();
EnumSet appState5=EnumSet.noneOf(YarnApplicationState.class);
appState5.add(YarnApplicationState.FINISHED);
when(client.getApplications(appType5,appState5)).thenReturn(getApplicationReports(applicationReports,appType5,appState5,true));
result=cli.run(new String[]{"application","-list","--appStates","FINISHED ,, , ALL"});
assertEquals(0,result);
verify(client).getApplications(appType5,appState5);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total number of applications (application-types: " + appType5 + " and states: "+ appState5+ ")"+ ":"+ 6);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
pw.print("Final-State\t Progress");
pw.println("\t Tracking-URL");
pw.print(" application_1234_0005\t ");
pw.print("appname\t YARN\t user\t ");
pw.print("queue\t RUNNING\t ");
pw.print("SUCCEEDED\t 53.79%");
pw.println("\t N/A");
pw.print(" application_1234_0006\t ");
pw.print("appname2\t NON-YARN\t user2\t ");
pw.print("queue2\t FINISHED\t ");
pw.print("SUCCEEDED\t 63.79%");
pw.println("\t N/A");
pw.print(" application_1234_0007\t ");
pw.print("appname3\t MAPREDUCE\t user3\t ");
pw.print("queue3\t RUNNING\t ");
pw.print("SUCCEEDED\t 73.79%");
pw.println("\t N/A");
pw.print(" application_1234_0008\t ");
pw.print("appname4\t NON-MAPREDUCE\t user4\t ");
pw.print("queue4\t FAILED\t ");
pw.print("SUCCEEDED\t 83.79%");
pw.println("\t N/A");
pw.print(" application_1234_0009\t ");
pw.print("appname5\t HIVE\t user5\t ");
pw.print("queue5\t ACCEPTED\t ");
pw.print("KILLED\t 93.79%");
pw.println("\t N/A");
pw.print(" application_1234_0010\t ");
pw.print("appname6\t PIG\t user6\t ");
pw.print("queue6\t SUBMITTED\t ");
pw.print("KILLED\t 99.79%");
pw.println("\t N/A");
pw.close();
appsReportStr=baos.toString("UTF-8");
Assert.assertEquals(appsReportStr,sysOutStream.toString());
verify(sysOut,times(5)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
Set appType6=new HashSet();
appType6.add("YARN");
appType6.add("NON-YARN");
EnumSet appState6=EnumSet.noneOf(YarnApplicationState.class);
appState6.add(YarnApplicationState.FINISHED);
when(client.getApplications(appType6,appState6)).thenReturn(getApplicationReports(applicationReports,appType6,appState6,false));
result=cli.run(new String[]{"application","-list","-appTypes","YARN, ,, NON-YARN","--appStates","finished"});
assertEquals(0,result);
verify(client).getApplications(appType6,appState6);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total number of applications (application-types: " + appType6 + " and states: "+ appState6+ ")"+ ":"+ 1);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
pw.print("Final-State\t Progress");
pw.println("\t Tracking-URL");
pw.print(" application_1234_0006\t ");
pw.print("appname2\t NON-YARN\t user2\t ");
pw.print("queue2\t FINISHED\t ");
pw.print("SUCCEEDED\t 63.79%");
pw.println("\t N/A");
pw.close();
appsReportStr=baos.toString("UTF-8");
Assert.assertEquals(appsReportStr,sysOutStream.toString());
verify(sysOut,times(6)).write(any(byte[].class),anyInt(),anyInt());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetApplicationReport() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationReport newApplicationReport=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.FINISHED,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null);
when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport);
int result=cli.run(new String[]{"application","-status",applicationId.toString()});
assertEquals(0,result);
verify(client).getApplicationReport(applicationId);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Application Report : ");
pw.println("\tApplication-Id : application_1234_0005");
pw.println("\tApplication-Name : appname");
pw.println("\tApplication-Type : YARN");
pw.println("\tUser : user");
pw.println("\tQueue : queue");
pw.println("\tStart-Time : 0");
pw.println("\tFinish-Time : 0");
pw.println("\tProgress : 53.79%");
pw.println("\tState : FINISHED");
pw.println("\tFinal-State : SUCCEEDED");
pw.println("\tTracking-URL : N/A");
pw.println("\tRPC Port : 124");
pw.println("\tAM Host : host");
pw.println("\tDiagnostics : diagnostics");
pw.close();
String appReportStr=baos.toString("UTF-8");
Assert.assertEquals(appReportStr,sysOutStream.toString());
verify(sysOut,times(1)).println(isA(String.class));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetContainerReport() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(applicationId,1);
ContainerId containerId=ContainerId.newInstance(attemptId,1);
ContainerReport container=ContainerReport.newInstance(containerId,null,NodeId.newInstance("host",1234),Priority.UNDEFINED,1234,5678,"diagnosticInfo","logURL",0,ContainerState.COMPLETE);
when(client.getContainerReport(any(ContainerId.class))).thenReturn(container);
int result=cli.run(new String[]{"container","-status",containerId.toString()});
assertEquals(0,result);
verify(client).getContainerReport(containerId);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Container Report : ");
pw.println("\tContainer-Id : container_1234_0005_01_000001");
pw.println("\tStart-Time : 1234");
pw.println("\tFinish-Time : 5678");
pw.println("\tState : COMPLETE");
pw.println("\tLOG-URL : logURL");
pw.println("\tHost : host:1234");
pw.println("\tDiagnostics : diagnosticInfo");
pw.close();
String appReportStr=baos.toString("UTF-8");
Assert.assertEquals(appReportStr,sysOutStream.toString());
verify(sysOut,times(1)).println(isA(String.class));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testVerifyAndSetConfiguration() throws Exception {
try {
HAUtil.verifyAndSetConfiguration(conf);
}
catch ( YarnRuntimeException e) {
fail("Should not throw any exceptions.");
}
assertEquals("Should be saved as Trimmed collection",StringUtils.getStringCollection(RM_NODE_IDS),HAUtil.getRMHAIds(conf));
assertEquals("Should be saved as Trimmed string",RM1_NODE_ID,HAUtil.getRMHAId(conf));
for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
assertEquals("RPC address not set for " + confKey,RM1_ADDRESS,conf.get(confKey));
}
conf.clear();
conf.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID);
try {
HAUtil.verifyAndSetConfiguration(conf);
}
catch ( YarnRuntimeException e) {
assertEquals("YarnRuntimeException by verifyAndSetRMHAIds()",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getInvalidValueMessage(YarnConfiguration.RM_HA_IDS,conf.get(YarnConfiguration.RM_HA_IDS) + "\nHA mode requires atleast two RMs"),e.getMessage());
}
conf.clear();
conf.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID + "," + RM2_NODE_ID);
for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
conf.set(HAUtil.addSuffix(confKey,RM1_NODE_ID),RM1_ADDRESS);
conf.set(HAUtil.addSuffix(confKey,RM2_NODE_ID),RM2_ADDRESS);
}
try {
HAUtil.verifyAndSetConfiguration(conf);
}
catch ( YarnRuntimeException e) {
assertEquals("YarnRuntimeException by getRMId()",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getNeedToSetValueMessage(YarnConfiguration.RM_HA_ID),e.getMessage());
}
conf.clear();
conf.set(YarnConfiguration.RM_HA_ID,RM_INVALID_NODE_ID);
conf.set(YarnConfiguration.RM_HA_IDS,RM_INVALID_NODE_ID + "," + RM1_NODE_ID);
for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
conf.set(confKey + RM_INVALID_NODE_ID,RM_INVALID_NODE_ID);
}
try {
HAUtil.verifyAndSetConfiguration(conf);
}
catch ( YarnRuntimeException e) {
assertEquals("YarnRuntimeException by addSuffix()",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getInvalidValueMessage(YarnConfiguration.RM_HA_ID,RM_INVALID_NODE_ID),e.getMessage());
}
conf.clear();
conf.set(YarnConfiguration.RM_HA_ID,RM1_NODE_ID);
conf.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID + "," + RM2_NODE_ID);
try {
HAUtil.verifyAndSetConfiguration(conf);
fail("Should throw YarnRuntimeException. by Configuration#set()");
}
catch ( YarnRuntimeException e) {
String confKey=HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS,RM1_NODE_ID);
assertEquals("YarnRuntimeException by Configuration#set()",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getNeedToSetValueMessage(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME,RM1_NODE_ID) + " or " + confKey),e.getMessage());
}
conf.clear();
conf.set(YarnConfiguration.RM_HA_IDS,RM2_NODE_ID + "," + RM3_NODE_ID);
conf.set(YarnConfiguration.RM_HA_ID,RM1_NODE_ID_UNTRIMMED);
for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
conf.set(HAUtil.addSuffix(confKey,RM1_NODE_ID),RM1_ADDRESS_UNTRIMMED);
conf.set(HAUtil.addSuffix(confKey,RM2_NODE_ID),RM2_ADDRESS);
conf.set(HAUtil.addSuffix(confKey,RM3_NODE_ID),RM3_ADDRESS);
}
try {
HAUtil.verifyAndSetConfiguration(conf);
}
catch ( YarnRuntimeException e) {
assertEquals("YarnRuntimeException by getRMId()'s validation",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getRMHAIdNeedToBeIncludedMessage("[rm2, rm3]",RM1_NODE_ID),e.getMessage());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetRMServiceId() throws Exception {
conf.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID + "," + RM2_NODE_ID);
Collection rmhaIds=HAUtil.getRMHAIds(conf);
assertEquals(2,rmhaIds.size());
String[] ids=rmhaIds.toArray(new String[0]);
assertEquals(RM1_NODE_ID,ids[0]);
assertEquals(RM2_NODE_ID,ids[1]);
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testRMWebUrlSpecified() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS,"fortesting:24543");
conf.set(YarnConfiguration.RM_ADDRESS,"rmtesting:9999");
String rmWebUrl=WebAppUtils.getRMWebAppURLWithScheme(conf);
String[] parts=rmWebUrl.split(":");
Assert.assertEquals("RM Web URL Port is incrrect",24543,Integer.valueOf(parts[parts.length - 1]).intValue());
Assert.assertNotSame("RM Web Url not resolved correctly. Should not be rmtesting","http://rmtesting:24543",rmWebUrl);
}
APIUtilityVerifier IdentityVerifier
@Test public void testDefaultRMWebUrl() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
String rmWebUrl=WebAppUtils.getRMWebAppURLWithScheme(conf);
Assert.assertNotSame("RM Web Url is not correct","http://0.0.0.0:8088",rmWebUrl);
}
TestCleaner APIUtilityVerifier BranchVerifier BooleanVerifier HybridVerifier
@After public void tearDown() throws IOException, InterruptedException {
if (zks != null) {
ZKDatabase zkDb=zks.getZKDatabase();
factory.shutdown();
try {
zkDb.close();
}
catch ( IOException ie) {
}
final int PORT=Integer.parseInt(hostPort.split(":")[1]);
Assert.assertTrue("waiting for server down",waitForServerDown("127.0.0.1:" + PORT,CONNECTION_TIMEOUT));
}
}
APIUtilityVerifier TestInitializer BooleanVerifier HybridVerifier
@Before public void setUp() throws IOException, InterruptedException {
System.setProperty("zookeeper.preAllocSize","100");
FileTxnLog.setPreallocSize(100 * 1024);
if (!BASETEST.exists()) {
BASETEST.mkdirs();
}
File dataDir=createTmpDir(BASETEST);
zks=new ZooKeeperServer(dataDir,dataDir,3000);
final int PORT=Integer.parseInt(hostPort.split(":")[1]);
if (factory == null) {
factory=new NIOServerCnxnFactory();
factory.configure(new InetSocketAddress(PORT),maxCnxns);
}
factory.startup(zks);
Assert.assertTrue("waiting for server up",waitForServerUp("127.0.0.1:" + PORT,CONNECTION_TIMEOUT));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test(timeout=10000) public void testContainerLogsFileAccess() throws IOException {
Assume.assumeTrue(NativeIO.isAvailable());
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
File workDir=new File(testWorkDir,"testContainerLogsFileAccess1");
Path remoteAppLogFile=new Path(workDir.getAbsolutePath(),"aggregatedLogFile");
Path srcFileRoot=new Path(workDir.getAbsolutePath(),"srcFiles");
String data="Log File content for container : ";
ApplicationId applicationId=ApplicationId.newInstance(1,1);
ApplicationAttemptId applicationAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
ContainerId testContainerId1=ContainerId.newInstance(applicationAttemptId,1);
Path appDir=new Path(srcFileRoot,testContainerId1.getApplicationAttemptId().getApplicationId().toString());
Path srcFilePath1=new Path(appDir,testContainerId1.toString());
String stdout="stdout";
String stderr="stderr";
writeSrcFile(srcFilePath1,stdout,data + testContainerId1.toString() + stdout);
writeSrcFile(srcFilePath1,stderr,data + testContainerId1.toString() + stderr);
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
LogWriter logWriter=new LogWriter(conf,remoteAppLogFile,ugi);
LogKey logKey=new LogKey(testContainerId1);
String randomUser="randomUser";
LogValue logValue=spy(new LogValue(Collections.singletonList(srcFileRoot.toString()),testContainerId1,randomUser));
when(logValue.getUser()).thenReturn(randomUser).thenReturn(ugi.getShortUserName());
logWriter.append(logKey,logValue);
logWriter.close();
BufferedReader in=new BufferedReader(new FileReader(new File(remoteAppLogFile.toUri().getRawPath())));
String line;
StringBuffer sb=new StringBuffer("");
while ((line=in.readLine()) != null) {
LOG.info(line);
sb.append(line);
}
line=sb.toString();
String expectedOwner=ugi.getShortUserName();
if (Path.WINDOWS) {
final String adminsGroupString="Administrators";
if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
expectedOwner=adminsGroupString;
}
}
String stdoutFile1=StringUtils.join(File.separator,Arrays.asList(new String[]{workDir.getAbsolutePath(),"srcFiles",testContainerId1.getApplicationAttemptId().getApplicationId().toString(),testContainerId1.toString(),stderr}));
String message1="Owner '" + expectedOwner + "' for path "+ stdoutFile1+ " did not match expected owner '"+ randomUser+ "'";
String stdoutFile2=StringUtils.join(File.separator,Arrays.asList(new String[]{workDir.getAbsolutePath(),"srcFiles",testContainerId1.getApplicationAttemptId().getApplicationId().toString(),testContainerId1.toString(),stdout}));
String message2="Owner '" + expectedOwner + "' for path "+ stdoutFile2+ " did not match expected owner '"+ ugi.getShortUserName()+ "'";
Assert.assertTrue(line.contains(message1));
Assert.assertFalse(line.contains(message2));
Assert.assertFalse(line.contains(data + testContainerId1.toString() + stderr));
Assert.assertTrue(line.contains(data + testContainerId1.toString() + stdout));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testReadAcontainerLogs1() throws Exception {
Configuration conf=new Configuration();
File workDir=new File(testWorkDir,"testReadAcontainerLogs1");
Path remoteAppLogFile=new Path(workDir.getAbsolutePath(),"aggregatedLogFile");
Path srcFileRoot=new Path(workDir.getAbsolutePath(),"srcFiles");
ContainerId testContainerId=TestContainerId.newContainerId(1,1,1,1);
Path t=new Path(srcFileRoot,testContainerId.getApplicationAttemptId().getApplicationId().toString());
Path srcFilePath=new Path(t,testContainerId.toString());
int numChars=80000;
writeSrcFile(srcFilePath,"stdout",numChars);
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
LogWriter logWriter=new LogWriter(conf,remoteAppLogFile,ugi);
LogKey logKey=new LogKey(testContainerId);
LogValue logValue=new LogValue(Collections.singletonList(srcFileRoot.toString()),testContainerId,ugi.getShortUserName());
logWriter.append(logKey,logValue);
logWriter.close();
FileStatus fsStatus=fs.getFileStatus(remoteAppLogFile);
Assert.assertEquals("permissions on log aggregation file are wrong",FsPermission.createImmutable((short)0640),fsStatus.getPermission());
LogReader logReader=new LogReader(conf,remoteAppLogFile);
LogKey rLogKey=new LogKey();
DataInputStream dis=logReader.next(rLogKey);
Writer writer=new StringWriter();
LogReader.readAcontainerLogs(dis,writer);
String s=writer.toString();
int expectedLength="\n\nLogType:stdout".length() + ("\nLogLength:" + numChars).length() + "\nLog Contents:\n".length()+ numChars;
Assert.assertTrue("LogType not matched",s.contains("LogType:stdout"));
Assert.assertTrue("LogLength not matched",s.contains("LogLength:" + numChars));
Assert.assertTrue("Log Contents not matched",s.contains("Log Contents"));
StringBuilder sb=new StringBuilder();
for (int i=0; i < numChars; i++) {
sb.append(filler);
}
String expectedContent=sb.toString();
Assert.assertTrue("Log content incorrect",s.contains(expectedContent));
Assert.assertEquals(expectedLength,s.length());
}
APIUtilityVerifier BooleanVerifier
/**
* try to read bad logs
* @throws Exception
*/
@Test public void testBadLogs() throws Exception {
FileUtil.fullyDelete(new File("target/logs"));
Configuration configuration=getConfiguration();
writeLogs("target/logs/logs/application_0_0001/container_0_0001_01_000001");
writeLog(configuration,"owner");
AggregatedLogsBlockForTest aggregatedBlock=getAggregatedLogsBlockForTest(configuration,"admin","container_0_0001_01_000001");
ByteArrayOutputStream data=new ByteArrayOutputStream();
PrintWriter printWriter=new PrintWriter(data);
HtmlBlock html=new HtmlBlockForTest();
HtmlBlock.Block block=new BlockForTest(html,printWriter,10,false);
aggregatedBlock.render(block);
block.getWriter().flush();
String out=data.toString();
assertTrue(out.contains("Logs not available for entity. Aggregation may not be complete, Check back later or try the nodemanager at localhost:1234"));
}
APIUtilityVerifier BranchVerifier BooleanVerifier
/**
* Log files was deleted.
* @throws Exception
*/
@Test public void testNoLogs() throws Exception {
FileUtil.fullyDelete(new File("target/logs"));
Configuration configuration=getConfiguration();
File f=new File("target/logs/logs/application_0_0001/container_0_0001_01_000001");
if (!f.exists()) {
assertTrue(f.mkdirs());
}
writeLog(configuration,"admin");
AggregatedLogsBlockForTest aggregatedBlock=getAggregatedLogsBlockForTest(configuration,"admin","container_0_0001_01_000001");
ByteArrayOutputStream data=new ByteArrayOutputStream();
PrintWriter printWriter=new PrintWriter(data);
HtmlBlock html=new HtmlBlockForTest();
HtmlBlock.Block block=new BlockForTest(html,printWriter,10,false);
aggregatedBlock.render(block);
block.getWriter().flush();
String out=data.toString();
assertTrue(out.contains("No logs available for container container_0_0001_01_000001"));
}
APIUtilityVerifier BooleanVerifier
/**
* All ok and the AggregatedLogsBlockFor should aggregate logs and show it.
* @throws Exception
*/
@Test public void testAggregatedLogsBlock() throws Exception {
FileUtil.fullyDelete(new File("target/logs"));
Configuration configuration=getConfiguration();
writeLogs("target/logs/logs/application_0_0001/container_0_0001_01_000001");
writeLog(configuration,"admin");
AggregatedLogsBlockForTest aggregatedBlock=getAggregatedLogsBlockForTest(configuration,"admin","container_0_0001_01_000001");
ByteArrayOutputStream data=new ByteArrayOutputStream();
PrintWriter printWriter=new PrintWriter(data);
HtmlBlock html=new HtmlBlockForTest();
HtmlBlock.Block block=new BlockForTest(html,printWriter,10,false);
aggregatedBlock.render(block);
block.getWriter().flush();
String out=data.toString();
assertTrue(out.contains("test log1"));
assertTrue(out.contains("test log2"));
assertTrue(out.contains("test log3"));
}
APIUtilityVerifier BooleanVerifier
/**
* Bad user. User 'owner' is trying to read logs without access
*/
@Test public void testAccessDenied() throws Exception {
FileUtil.fullyDelete(new File("target/logs"));
Configuration configuration=getConfiguration();
writeLogs("target/logs/logs/application_0_0001/container_0_0001_01_000001");
writeLog(configuration,"owner");
AggregatedLogsBlockForTest aggregatedBlock=getAggregatedLogsBlockForTest(configuration,"owner","container_0_0001_01_000001");
ByteArrayOutputStream data=new ByteArrayOutputStream();
PrintWriter printWriter=new PrintWriter(data);
HtmlBlock html=new HtmlBlockForTest();
HtmlBlock.Block block=new BlockForTest(html,printWriter,10,false);
aggregatedBlock.render(block);
block.getWriter().flush();
String out=data.toString();
assertTrue(out.contains("User [owner] is not authorized to view the logs for entity"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testRegisterNodeManagerRequest(){
ApplicationId appId=ApplicationId.newInstance(123456789,1);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId containerId=ContainerId.newInstance(attemptId,1);
NMContainerStatus containerReport=NMContainerStatus.newInstance(containerId,ContainerState.RUNNING,Resource.newInstance(1024,1),"diagnostics",0,Priority.newInstance(10),1234);
List reports=Arrays.asList(containerReport);
RegisterNodeManagerRequest request=RegisterNodeManagerRequest.newInstance(NodeId.newInstance("1.1.1.1",1000),8080,Resource.newInstance(1024,1),"NM-version-id",reports,Arrays.asList(appId));
RegisterNodeManagerRequest requestProto=new RegisterNodeManagerRequestPBImpl(((RegisterNodeManagerRequestPBImpl)request).getProto());
Assert.assertEquals(containerReport,requestProto.getNMContainerStatuses().get(0));
Assert.assertEquals(8080,requestProto.getHttpPort());
Assert.assertEquals("NM-version-id",requestProto.getNMVersion());
Assert.assertEquals(NodeId.newInstance("1.1.1.1",1000),requestProto.getNodeId());
Assert.assertEquals(Resource.newInstance(1024,1),requestProto.getResource());
Assert.assertEquals(1,requestProto.getRunningApplications().size());
Assert.assertEquals(appId,requestProto.getRunningApplications().get(0));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNMContainerStatus(){
ApplicationId appId=ApplicationId.newInstance(123456789,1);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId containerId=ContainerId.newInstance(attemptId,1);
Resource resource=Resource.newInstance(1000,200);
NMContainerStatus report=NMContainerStatus.newInstance(containerId,ContainerState.COMPLETE,resource,"diagnostics",ContainerExitStatus.ABORTED,Priority.newInstance(10),1234);
NMContainerStatus reportProto=new NMContainerStatusPBImpl(((NMContainerStatusPBImpl)report).getProto());
Assert.assertEquals("diagnostics",reportProto.getDiagnostics());
Assert.assertEquals(resource,reportProto.getAllocatedResource());
Assert.assertEquals(ContainerExitStatus.ABORTED,reportProto.getContainerExitStatus());
Assert.assertEquals(ContainerState.COMPLETE,reportProto.getContainerState());
Assert.assertEquals(containerId,reportProto.getContainerId());
Assert.assertEquals(Priority.newInstance(10),reportProto.getPriority());
Assert.assertEquals(1234,reportProto.getCreationTime());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRoundTrip() throws Exception {
RegisterNodeManagerResponse resp=recordFactory.newRecordInstance(RegisterNodeManagerResponse.class);
byte b[]={0,1,2,3,4,5};
MasterKey containerTokenMK=recordFactory.newRecordInstance(MasterKey.class);
containerTokenMK.setKeyId(54321);
containerTokenMK.setBytes(ByteBuffer.wrap(b));
resp.setContainerTokenMasterKey(containerTokenMK);
MasterKey nmTokenMK=recordFactory.newRecordInstance(MasterKey.class);
nmTokenMK.setKeyId(12345);
nmTokenMK.setBytes(ByteBuffer.wrap(b));
resp.setNMTokenMasterKey(nmTokenMK);
resp.setNodeAction(NodeAction.NORMAL);
assertEquals(NodeAction.NORMAL,resp.getNodeAction());
assertNotNull(resp.getContainerTokenMasterKey());
assertEquals(54321,resp.getContainerTokenMasterKey().getKeyId());
assertArrayEquals(b,resp.getContainerTokenMasterKey().getBytes().array());
RegisterNodeManagerResponse respCopy=serDe(resp);
assertEquals(NodeAction.NORMAL,respCopy.getNodeAction());
assertNotNull(respCopy.getContainerTokenMasterKey());
assertEquals(54321,respCopy.getContainerTokenMasterKey().getKeyId());
assertArrayEquals(b,respCopy.getContainerTokenMasterKey().getBytes().array());
assertNotNull(resp.getNMTokenMasterKey());
assertEquals(12345,resp.getNMTokenMasterKey().getKeyId());
assertArrayEquals(b,resp.getNMTokenMasterKey().getBytes().array());
respCopy=serDe(resp);
assertEquals(NodeAction.NORMAL,respCopy.getNodeAction());
assertNotNull(respCopy.getNMTokenMasterKey());
assertEquals(12345,respCopy.getNMTokenMasterKey().getKeyId());
assertArrayEquals(b,respCopy.getNMTokenMasterKey().getBytes().array());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testApplications() throws IOException, YarnException {
ApplicationId appId=null;
appId=ApplicationId.newInstance(0,1);
writeApplicationStartData(appId);
writeApplicationFinishData(appId);
ApplicationId appId1=ApplicationId.newInstance(0,2);
writeApplicationStartData(appId1);
writeApplicationFinishData(appId1);
GetApplicationsRequest request=GetApplicationsRequest.newInstance();
GetApplicationsResponse response=historyServer.getClientService().getClientHandler().getApplications(request);
List appReport=response.getApplicationList();
Assert.assertNotNull(appReport);
Assert.assertEquals(appId,appReport.get(0).getApplicationId());
Assert.assertEquals(appId1,appReport.get(1).getApplicationId());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testContainers() throws IOException, YarnException {
ApplicationId appId=ApplicationId.newInstance(0,1);
writeApplicationStartData(appId);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId containerId=ContainerId.newInstance(appAttemptId,1);
ContainerId containerId1=ContainerId.newInstance(appAttemptId,2);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
writeContainerStartData(containerId1);
writeContainerFinishData(containerId1);
writeApplicationFinishData(appId);
GetContainersRequest request=GetContainersRequest.newInstance(appAttemptId);
GetContainersResponse response=historyServer.getClientService().getClientHandler().getContainers(request);
List containers=response.getContainerList();
Assert.assertNotNull(containers);
Assert.assertEquals(containerId,containers.get(1).getContainerId());
Assert.assertEquals(containerId1,containers.get(0).getContainerId());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationAttemptReport() throws IOException, YarnException {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
writeApplicationAttemptStartData(appAttemptId);
writeApplicationAttemptFinishData(appAttemptId);
GetApplicationAttemptReportRequest request=GetApplicationAttemptReportRequest.newInstance(appAttemptId);
GetApplicationAttemptReportResponse response=historyServer.getClientService().getClientHandler().getApplicationAttemptReport(request);
ApplicationAttemptReport attemptReport=response.getApplicationAttemptReport();
Assert.assertNotNull(attemptReport);
Assert.assertEquals("appattempt_0_0001_000001",attemptReport.getApplicationAttemptId().toString());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationAttempts() throws IOException, YarnException {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ApplicationAttemptId appAttemptId1=ApplicationAttemptId.newInstance(appId,2);
writeApplicationAttemptStartData(appAttemptId);
writeApplicationAttemptFinishData(appAttemptId);
writeApplicationAttemptStartData(appAttemptId1);
writeApplicationAttemptFinishData(appAttemptId1);
GetApplicationAttemptsRequest request=GetApplicationAttemptsRequest.newInstance(appId);
GetApplicationAttemptsResponse response=historyServer.getClientService().getClientHandler().getApplicationAttempts(request);
List attemptReports=response.getApplicationAttemptList();
Assert.assertNotNull(attemptReports);
Assert.assertEquals(appAttemptId,attemptReports.get(0).getApplicationAttemptId());
Assert.assertEquals(appAttemptId1,attemptReports.get(1).getApplicationAttemptId());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationReport() throws IOException, YarnException {
ApplicationId appId=null;
appId=ApplicationId.newInstance(0,1);
writeApplicationStartData(appId);
writeApplicationFinishData(appId);
GetApplicationReportRequest request=GetApplicationReportRequest.newInstance(appId);
GetApplicationReportResponse response=historyServer.getClientService().getClientHandler().getApplicationReport(request);
ApplicationReport appReport=response.getApplicationReport();
Assert.assertNotNull(appReport);
Assert.assertEquals("application_0_0001",appReport.getApplicationId().toString());
Assert.assertEquals("test type",appReport.getApplicationType().toString());
Assert.assertEquals("test queue",appReport.getQueue().toString());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testContainerReport() throws IOException, YarnException {
ApplicationId appId=ApplicationId.newInstance(0,1);
writeApplicationStartData(appId);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId containerId=ContainerId.newInstance(appAttemptId,1);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
writeApplicationFinishData(appId);
GetContainerReportRequest request=GetContainerReportRequest.newInstance(containerId);
GetContainerReportResponse response=historyServer.getClientService().getClientHandler().getContainerReport(request);
ContainerReport container=response.getContainerReport();
Assert.assertNotNull(container);
Assert.assertEquals(containerId,container.getContainerId());
Assert.assertEquals(expectedLogUrl,container.getLogUrl());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationReport() throws IOException, YarnException {
ApplicationId appId=null;
appId=ApplicationId.newInstance(0,1);
writeApplicationStartData(appId);
writeApplicationFinishData(appId);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
writeApplicationAttemptStartData(appAttemptId);
writeApplicationAttemptFinishData(appAttemptId);
ApplicationReport appReport=applicationHistoryManagerImpl.getApplication(appId);
Assert.assertNotNull(appReport);
Assert.assertEquals(appId,appReport.getApplicationId());
Assert.assertEquals(appAttemptId,appReport.getCurrentApplicationAttemptId());
Assert.assertEquals(appAttemptId.toString(),appReport.getHost());
Assert.assertEquals("test type",appReport.getApplicationType().toString());
Assert.assertEquals("test queue",appReport.getQueue().toString());
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testReadWriteApplicationAttemptHistory() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
try {
writeApplicationAttemptFinishData(appAttemptId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is stored before the start information"));
}
int numAppAttempts=5;
writeApplicationStartData(appId);
for (int i=1; i <= numAppAttempts; ++i) {
appAttemptId=ApplicationAttemptId.newInstance(appId,i);
writeApplicationAttemptStartData(appAttemptId);
writeApplicationAttemptFinishData(appAttemptId);
}
Assert.assertEquals(numAppAttempts,store.getApplicationAttempts(appId).size());
for (int i=1; i <= numAppAttempts; ++i) {
appAttemptId=ApplicationAttemptId.newInstance(appId,i);
ApplicationAttemptHistoryData data=store.getApplicationAttempt(appAttemptId);
Assert.assertNotNull(data);
Assert.assertEquals(appAttemptId.toString(),data.getHost());
Assert.assertEquals(appAttemptId.toString(),data.getDiagnosticsInfo());
}
writeApplicationFinishData(appId);
appAttemptId=ApplicationAttemptId.newInstance(appId,1);
try {
writeApplicationAttemptStartData(appAttemptId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
try {
writeApplicationAttemptFinishData(appAttemptId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testReadWriteApplicationHistory() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
try {
writeApplicationFinishData(appId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is stored before the start information"));
}
int numApps=5;
for (int i=1; i <= numApps; ++i) {
appId=ApplicationId.newInstance(0,i);
writeApplicationStartData(appId);
writeApplicationFinishData(appId);
}
Assert.assertEquals(numApps,store.getAllApplications().size());
for (int i=1; i <= numApps; ++i) {
appId=ApplicationId.newInstance(0,i);
ApplicationHistoryData data=store.getApplication(appId);
Assert.assertNotNull(data);
Assert.assertEquals(appId.toString(),data.getApplicationName());
Assert.assertEquals(appId.toString(),data.getDiagnosticsInfo());
}
appId=ApplicationId.newInstance(0,1);
try {
writeApplicationStartData(appId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
try {
writeApplicationFinishData(appId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
}
APIUtilityVerifier BooleanVerifier
@Test public void testMassiveWriteContainerHistory() throws IOException {
long mb=1024 * 1024;
Runtime runtime=Runtime.getRuntime();
long usedMemoryBefore=(runtime.totalMemory() - runtime.freeMemory()) / mb;
int numContainers=100000;
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
for (int i=1; i <= numContainers; ++i) {
ContainerId containerId=ContainerId.newInstance(appAttemptId,i);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
long usedMemoryAfter=(runtime.totalMemory() - runtime.freeMemory()) / mb;
Assert.assertTrue((usedMemoryAfter - usedMemoryBefore) < 400);
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testReadWriteContainerHistory() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId containerId=ContainerId.newInstance(appAttemptId,1);
try {
writeContainerFinishData(containerId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is stored before the start information"));
}
writeApplicationAttemptStartData(appAttemptId);
int numContainers=5;
for (int i=1; i <= numContainers; ++i) {
containerId=ContainerId.newInstance(appAttemptId,i);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
Assert.assertEquals(numContainers,store.getContainers(appAttemptId).size());
for (int i=1; i <= numContainers; ++i) {
containerId=ContainerId.newInstance(appAttemptId,i);
ContainerHistoryData data=store.getContainer(containerId);
Assert.assertNotNull(data);
Assert.assertEquals(Priority.newInstance(containerId.getId()),data.getPriority());
Assert.assertEquals(containerId.toString(),data.getDiagnosticsInfo());
}
ContainerHistoryData masterContainer=store.getAMContainer(appAttemptId);
Assert.assertNotNull(masterContainer);
Assert.assertEquals(ContainerId.newInstance(appAttemptId,1),masterContainer.getContainerId());
writeApplicationAttemptFinishData(appAttemptId);
containerId=ContainerId.newInstance(appAttemptId,1);
try {
writeContainerStartData(containerId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
try {
writeContainerFinishData(containerId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppControllerIndex() throws Exception {
ApplicationHistoryManager ahManager=mock(ApplicationHistoryManager.class);
Injector injector=WebAppTests.createMockInjector(ApplicationHistoryManager.class,ahManager);
AHSController controller=injector.getInstance(AHSController.class);
controller.index();
Assert.assertEquals("Application History",controller.get(TITLE,"unknown"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMultipleContainers() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").path(appId.toString()).path("appattempts").path(appAttemptId.toString()).path("containers").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject containers=json.getJSONObject("containers");
assertEquals("incorrect number of elements",1,containers.length());
JSONArray array=containers.getJSONArray("container");
assertEquals("incorrect number of elements",5,array.length());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleContainer() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId containerId=ContainerId.newInstance(appAttemptId,1);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").path(appId.toString()).path("appattempts").path(appAttemptId.toString()).path("containers").path(containerId.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject container=json.getJSONObject("container");
assertEquals(containerId.toString(),container.getString("containerId"));
assertEquals(containerId.toString(),container.getString("diagnosticsInfo"));
assertEquals("0",container.getString("allocatedMB"));
assertEquals("0",container.getString("allocatedVCores"));
assertEquals(NodeId.newInstance("localhost",0).toString(),container.getString("assignedNodeId"));
assertEquals(Priority.newInstance(containerId.getId()).toString(),container.getString("priority"));
Configuration conf=new YarnConfiguration();
assertEquals(WebAppUtils.getHttpSchemePrefix(conf) + WebAppUtils.getAHSWebAppURLWithoutScheme(conf) + "/applicationhistory/logs/localhost:0/container_0_0001_01_000001/"+ "container_0_0001_01_000001/test user",container.getString("logUrl"));
assertEquals(ContainerState.COMPLETE.toString(),container.getString("containerState"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleAttempt() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").path(appId.toString()).path("appattempts").path(appAttemptId.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject appAttempt=json.getJSONObject("appAttempt");
assertEquals(appAttemptId.toString(),appAttempt.getString("appAttemptId"));
assertEquals(appAttemptId.toString(),appAttempt.getString("host"));
assertEquals(appAttemptId.toString(),appAttempt.getString("diagnosticsInfo"));
assertEquals("test tracking url",appAttempt.getString("trackingUrl"));
assertEquals(YarnApplicationAttemptState.FINISHED.toString(),appAttempt.get("appAttemptState"));
}
APIUtilityVerifier BooleanVerifier
@Test(timeout=5000) public void testRunCommandNoPriority() throws Exception {
Configuration conf=new Configuration();
String[] command=ContainerExecutor.getRunCommand("echo","group1",conf);
assertTrue("first command should be the run command for the platform",command[0].equals(Shell.WINUTILS) || command[0].equals("bash"));
}
APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testRunCommandwithPriority() throws Exception {
Configuration conf=new Configuration();
conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY,2);
String[] command=ContainerExecutor.getRunCommand("echo","group1",conf);
if (Shell.WINDOWS) {
assertEquals("first command should be the run command for the platform",Shell.WINUTILS,command[0]);
}
else {
assertEquals("first command should be nice","nice",command[0]);
assertEquals("second command should be -n","-n",command[1]);
assertEquals("third command should be the priority",Integer.toString(2),command[2]);
}
conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY,-5);
command=ContainerExecutor.getRunCommand("echo","group1",conf);
if (Shell.WINDOWS) {
assertEquals("first command should be the run command for the platform",Shell.WINUTILS,command[0]);
}
else {
assertEquals("first command should be nice","nice",command[0]);
assertEquals("second command should be -n","-n",command[1]);
assertEquals("third command should be the priority",Integer.toString(-5),command[2]);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testDirPermissions() throws Exception {
deleteTmpFiles();
final String user="somebody";
final String appId="app_12345_123";
final FsPermission userCachePerm=new FsPermission(DefaultContainerExecutor.USER_PERM);
final FsPermission appCachePerm=new FsPermission(DefaultContainerExecutor.APPCACHE_PERM);
final FsPermission fileCachePerm=new FsPermission(DefaultContainerExecutor.FILECACHE_PERM);
final FsPermission appDirPerm=new FsPermission(DefaultContainerExecutor.APPDIR_PERM);
final FsPermission logDirPerm=new FsPermission(DefaultContainerExecutor.LOGDIR_PERM);
List localDirs=new ArrayList();
localDirs.add(new Path(BASE_TMP_PATH,"localDirA").toString());
localDirs.add(new Path(BASE_TMP_PATH,"localDirB").toString());
List logDirs=new ArrayList();
logDirs.add(new Path(BASE_TMP_PATH,"logDirA").toString());
logDirs.add(new Path(BASE_TMP_PATH,"logDirB").toString());
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077");
FileContext lfs=FileContext.getLocalFSFileContext(conf);
DefaultContainerExecutor executor=new DefaultContainerExecutor(lfs);
executor.init();
try {
executor.createUserLocalDirs(localDirs,user);
executor.createUserCacheDirs(localDirs,user);
executor.createAppDirs(localDirs,user,appId);
for ( String dir : localDirs) {
FileStatus stats=lfs.getFileStatus(new Path(new Path(dir,ContainerLocalizer.USERCACHE),user));
Assert.assertEquals(userCachePerm,stats.getPermission());
}
for ( String dir : localDirs) {
Path userCachePath=new Path(new Path(dir,ContainerLocalizer.USERCACHE),user);
Path appCachePath=new Path(userCachePath,ContainerLocalizer.APPCACHE);
FileStatus stats=lfs.getFileStatus(appCachePath);
Assert.assertEquals(appCachePerm,stats.getPermission());
stats=lfs.getFileStatus(new Path(userCachePath,ContainerLocalizer.FILECACHE));
Assert.assertEquals(fileCachePerm,stats.getPermission());
stats=lfs.getFileStatus(new Path(appCachePath,appId));
Assert.assertEquals(appDirPerm,stats.getPermission());
}
executor.createAppLogDirs(appId,logDirs);
for ( String dir : logDirs) {
FileStatus stats=lfs.getFileStatus(new Path(dir,appId));
Assert.assertEquals(logDirPerm,stats.getPermission());
}
}
finally {
deleteTmpFiles();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier
@Test public void testContainerLaunchError() throws IOException, InterruptedException {
Path localDir=new Path(BASE_TMP_PATH,"localDir");
List localDirs=new ArrayList();
localDirs.add(localDir.toString());
List logDirs=new ArrayList();
Path logDir=new Path(BASE_TMP_PATH,"logDir");
logDirs.add(logDir.toString());
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077");
conf.set(YarnConfiguration.NM_LOCAL_DIRS,localDir.toString());
conf.set(YarnConfiguration.NM_LOG_DIRS,logDir.toString());
FileContext lfs=FileContext.getLocalFSFileContext(conf);
DefaultContainerExecutor mockExec=spy(new DefaultContainerExecutor(lfs));
mockExec.setConf(conf);
doAnswer(new Answer(){
@Override public Object answer( InvocationOnMock invocationOnMock) throws Throwable {
String diagnostics=(String)invocationOnMock.getArguments()[0];
assertTrue("Invalid Diagnostics message: " + diagnostics,diagnostics.contains("No such file or directory"));
return null;
}
}
).when(mockExec).logOutput(any(String.class));
String appSubmitter="nobody";
String appId="APP_ID";
String containerId="CONTAINER_ID";
Container container=mock(Container.class);
ContainerId cId=mock(ContainerId.class);
ContainerLaunchContext context=mock(ContainerLaunchContext.class);
HashMap env=new HashMap();
when(container.getContainerId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
try {
doAnswer(new Answer(){
@Override public Object answer( InvocationOnMock invocationOnMock) throws Throwable {
ContainerDiagnosticsUpdateEvent event=(ContainerDiagnosticsUpdateEvent)invocationOnMock.getArguments()[0];
assertTrue("Invalid Diagnostics message: " + event.getDiagnosticsUpdate(),event.getDiagnosticsUpdate().contains("No such file or directory"));
return null;
}
}
).when(container).handle(any(ContainerDiagnosticsUpdateEvent.class));
when(cId.toString()).thenReturn(containerId);
when(cId.getApplicationAttemptId()).thenReturn(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),0));
when(context.getEnvironment()).thenReturn(env);
mockExec.createUserLocalDirs(localDirs,appSubmitter);
mockExec.createUserCacheDirs(localDirs,appSubmitter);
mockExec.createAppDirs(localDirs,appSubmitter,appId);
mockExec.createAppLogDirs(appId,logDirs);
Path scriptPath=new Path("file:///bin/echo");
Path tokensPath=new Path("file:///dev/null");
Path workDir=localDir;
Path pidFile=new Path(workDir,"pid.txt");
mockExec.init();
mockExec.activateContainer(cId,pidFile);
int ret=mockExec.launchContainer(container,scriptPath,tokensPath,appSubmitter,appId,workDir,localDirs,localDirs);
Assert.assertNotSame(0,ret);
}
finally {
mockExec.deleteAsUser(appSubmitter,localDir);
mockExec.deleteAsUser(appSubmitter,logDir);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testRelativeDelete() throws Exception {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List baseDirs=buildDirs(r,base,4);
createDirs(new Path("."),baseDirs);
List content=buildDirs(r,new Path("."),10);
for ( Path b : baseDirs) {
createDirs(b,content);
}
DeletionService del=new DeletionService(new FakeDefaultContainerExecutor());
try {
del.init(new Configuration());
del.start();
for ( Path p : content) {
assertTrue(lfs.util().exists(new Path(baseDirs.get(0),p)));
del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",p,baseDirs.toArray(new Path[4]));
}
int msecToWait=20 * 1000;
for ( Path p : baseDirs) {
for ( Path q : content) {
Path fp=new Path(p,q);
while (msecToWait > 0 && lfs.util().exists(fp)) {
Thread.sleep(100);
msecToWait-=100;
}
assertFalse(lfs.util().exists(fp));
}
}
}
finally {
del.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testFileDeletionTaskDependency() throws Exception {
FakeDefaultContainerExecutor exec=new FakeDefaultContainerExecutor();
Configuration conf=new Configuration();
exec.setConf(conf);
DeletionService del=new DeletionService(exec);
del.init(conf);
del.start();
try {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List dirs=buildDirs(r,base,2);
createDirs(new Path("."),dirs);
List subDirs=buildDirs(r,dirs.get(0),2);
FileDeletionTask dependentDeletionTask=del.createFileDeletionTask(null,dirs.get(0),new Path[]{});
List deletionTasks=new ArrayList();
for ( Path subDir : subDirs) {
FileDeletionTask deletionTask=del.createFileDeletionTask(null,null,new Path[]{subDir});
deletionTask.addFileDeletionTaskDependency(dependentDeletionTask);
deletionTasks.add(deletionTask);
}
for ( FileDeletionTask task : deletionTasks) {
del.scheduleFileDeletionTask(task);
}
int msecToWait=20 * 1000;
while (msecToWait > 0 && (lfs.util().exists(dirs.get(0)))) {
Thread.sleep(100);
msecToWait-=100;
}
assertFalse(lfs.util().exists(dirs.get(0)));
subDirs=buildDirs(r,dirs.get(1),2);
subDirs.add(new Path(dirs.get(1),"absentFile"));
dependentDeletionTask=del.createFileDeletionTask(null,dirs.get(1),new Path[]{});
deletionTasks=new ArrayList();
for ( Path subDir : subDirs) {
FileDeletionTask deletionTask=del.createFileDeletionTask(null,null,new Path[]{subDir});
deletionTask.addFileDeletionTaskDependency(dependentDeletionTask);
deletionTasks.add(deletionTask);
}
deletionTasks.get(2).setSuccess(false);
for ( FileDeletionTask task : deletionTasks) {
del.scheduleFileDeletionTask(task);
}
msecToWait=20 * 1000;
while (msecToWait > 0 && (lfs.util().exists(subDirs.get(0)) || lfs.util().exists(subDirs.get(1)))) {
Thread.sleep(100);
msecToWait-=100;
}
assertTrue(lfs.util().exists(dirs.get(1)));
}
finally {
del.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testRecovery() throws Exception {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List baseDirs=buildDirs(r,base,4);
createDirs(new Path("."),baseDirs);
List content=buildDirs(r,new Path("."),10);
for ( Path b : baseDirs) {
createDirs(b,content);
}
Configuration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true);
conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC,1);
NMMemoryStateStoreService stateStore=new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
DeletionService del=new DeletionService(new FakeDefaultContainerExecutor(),stateStore);
try {
del.init(conf);
del.start();
for ( Path p : content) {
assertTrue(lfs.util().exists(new Path(baseDirs.get(0),p)));
del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",p,baseDirs.toArray(new Path[4]));
}
del.stop();
del=new DeletionService(new FakeDefaultContainerExecutor(),stateStore);
del.init(conf);
del.start();
int msecToWait=10 * 1000;
for ( Path p : baseDirs) {
for ( Path q : content) {
Path fp=new Path(p,q);
while (msecToWait > 0 && lfs.util().exists(fp)) {
Thread.sleep(100);
msecToWait-=100;
}
assertFalse(lfs.util().exists(fp));
}
}
}
finally {
del.close();
stateStore.close();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testDiskSpaceUtilizationLimit() throws IOException {
String dirA=new File(testDir,"dirA").getPath();
String[] dirs={dirA};
DirectoryCollection dc=new DirectoryCollection(dirs,0.0F);
dc.checkDirs();
Assert.assertEquals(0,dc.getGoodDirs().size());
Assert.assertEquals(1,dc.getFailedDirs().size());
dc=new DirectoryCollection(dirs,100.0F);
dc.checkDirs();
Assert.assertEquals(1,dc.getGoodDirs().size());
Assert.assertEquals(0,dc.getFailedDirs().size());
dc=new DirectoryCollection(dirs,testDir.getTotalSpace() / (1024 * 1024));
dc.checkDirs();
Assert.assertEquals(0,dc.getGoodDirs().size());
Assert.assertEquals(1,dc.getFailedDirs().size());
dc=new DirectoryCollection(dirs,100.0F,0);
dc.checkDirs();
Assert.assertEquals(1,dc.getGoodDirs().size());
Assert.assertEquals(0,dc.getFailedDirs().size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCreateDirectories() throws IOException {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077");
FileContext localFs=FileContext.getLocalFSFileContext(conf);
String dirA=new File(testDir,"dirA").getPath();
String dirB=new File(dirA,"dirB").getPath();
String dirC=new File(testDir,"dirC").getPath();
Path pathC=new Path(dirC);
FsPermission permDirC=new FsPermission((short)0710);
localFs.mkdir(pathC,null,true);
localFs.setPermission(pathC,permDirC);
String[] dirs={dirA,dirB,dirC};
DirectoryCollection dc=new DirectoryCollection(dirs,conf.getFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE));
FsPermission defaultPerm=FsPermission.getDefault().applyUMask(new FsPermission((short)FsPermission.DEFAULT_UMASK));
boolean createResult=dc.createNonExistentDirs(localFs,defaultPerm);
Assert.assertTrue(createResult);
FileStatus status=localFs.getFileStatus(new Path(dirA));
Assert.assertEquals("local dir parent not created with proper permissions",defaultPerm,status.getPermission());
status=localFs.getFileStatus(new Path(dirB));
Assert.assertEquals("local dir not created with proper permissions",defaultPerm,status.getPermission());
status=localFs.getFileStatus(pathC);
Assert.assertEquals("existing local directory permissions modified",permDirC,status.getPermission());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testConcurrentAccess() throws IOException {
Configuration conf=new Configuration();
String[] dirs={testFile.getPath()};
DirectoryCollection dc=new DirectoryCollection(dirs,conf.getFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE));
List list=dc.getGoodDirs();
ListIterator li=list.listIterator();
Assert.assertTrue("checkDirs did not remove test file from directory list",dc.checkDirs());
li.next();
}
APIUtilityVerifier EqualityVerifier
@Test public void testContainerLaunch() throws IOException {
if (!shouldRun()) {
return;
}
File touchFile=new File(workSpace,"touch-file");
int ret=runAndBlock("touch",touchFile.getAbsolutePath());
assertEquals(0,ret);
FileStatus fileStatus=FileContext.getLocalFSFileContext().getFileStatus(new Path(touchFile.getAbsolutePath()));
assertEquals(appSubmitter,fileStatus.getOwner());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testContainerKill() throws Exception {
if (!shouldRun()) {
return;
}
final ContainerId sleepId=getNextContainerId();
Thread t=new Thread(){
public void run(){
try {
runAndBlock(sleepId,"sleep","100");
}
catch ( IOException e) {
LOG.warn("Caught exception while running sleep",e);
}
}
}
;
t.setDaemon(true);
t.start();
assertTrue(t.isAlive());
String pid=null;
int count=10;
while ((pid=exec.getProcessId(sleepId)) == null && count > 0) {
LOG.info("Sleeping for 200 ms before checking for pid ");
Thread.sleep(200);
count--;
}
assertNotNull(pid);
LOG.info("Going to killing the process.");
exec.signalContainer(appSubmitter,pid,Signal.TERM);
LOG.info("sleeping for 100ms to let the sleep be killed");
Thread.sleep(100);
assertFalse(t.isAlive());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=2000000) public void testClearLocalDirWhenNodeReboot() throws IOException, YarnException, InterruptedException {
nm=new MyNodeManager();
nm.start();
final ContainerManagementProtocol containerManager=nm.getContainerManager();
createFiles(nmLocalDir.getAbsolutePath(),ContainerLocalizer.FILECACHE,100);
localResourceDir.mkdirs();
ContainerLaunchContext containerLaunchContext=Records.newRecord(ContainerLaunchContext.class);
ContainerId cId=createContainerId();
URL localResourceUri=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(localResourceDir.getAbsolutePath())));
LocalResource localResource=LocalResource.newInstance(localResourceUri,LocalResourceType.FILE,LocalResourceVisibility.APPLICATION,-1,localResourceDir.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,localResource);
containerLaunchContext.setLocalResources(localResources);
List commands=new ArrayList();
containerLaunchContext.setCommands(commands);
NodeId nodeId=nm.getNMContext().getNodeId();
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,TestContainerManager.createContainerToken(cId,0,nodeId,destinationFile,nm.getNMContext().getContainerTokenSecretManager()));
List list=new ArrayList();
list.add(scRequest);
final StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
final UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(cId.getApplicationAttemptId().toString());
NMTokenIdentifier nmIdentifier=new NMTokenIdentifier(cId.getApplicationAttemptId(),nodeId,user,123);
currentUser.addTokenIdentifier(nmIdentifier);
currentUser.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws YarnException, IOException {
nm.getContainerManager().startContainers(allRequests);
return null;
}
}
);
List containerIds=new ArrayList();
containerIds.add(cId);
GetContainerStatusesRequest request=GetContainerStatusesRequest.newInstance(containerIds);
Container container=nm.getNMContext().getContainers().get(request.getContainerIds().get(0));
final int MAX_TRIES=20;
int numTries=0;
while (!container.getContainerState().equals(ContainerState.DONE) && numTries <= MAX_TRIES) {
try {
Thread.sleep(500);
}
catch ( InterruptedException ex) {
}
numTries++;
}
Assert.assertEquals(ContainerState.DONE,container.getContainerState());
Assert.assertTrue("The container should create a subDir named currentUser: " + user + "under localDir/usercache",numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.USERCACHE) > 0);
Assert.assertTrue("There should be files or Dirs under nm_private when " + "container is launched",numOfLocalDirs(nmLocalDir.getAbsolutePath(),ResourceLocalizationService.NM_PRIVATE_DIR) > 0);
nm.stop();
nm=new MyNodeManager();
nm.start();
numTries=0;
while ((numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.USERCACHE) > 0 || numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.FILECACHE) > 0 || numOfLocalDirs(nmLocalDir.getAbsolutePath(),ResourceLocalizationService.NM_PRIVATE_DIR) > 0) && numTries < MAX_TRIES) {
try {
Thread.sleep(500);
}
catch ( InterruptedException ex) {
}
numTries++;
}
Assert.assertTrue("After NM reboots, all local files should be deleted",numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.USERCACHE) == 0 && numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.FILECACHE) == 0 && numOfLocalDirs(nmLocalDir.getAbsolutePath(),ResourceLocalizationService.NM_PRIVATE_DIR) == 0);
verify(delService,times(1)).delete((String)isNull(),argThat(new PathInclude(ResourceLocalizationService.NM_PRIVATE_DIR + "_DEL_")));
verify(delService,times(1)).delete((String)isNull(),argThat(new PathInclude(ContainerLocalizer.FILECACHE + "_DEL_")));
verify(delService,times(1)).scheduleFileDeletionTask(argThat(new FileDeletionInclude(user,null,new String[]{destinationFile})));
verify(delService,times(1)).scheduleFileDeletionTask(argThat(new FileDeletionInclude(null,ContainerLocalizer.USERCACHE + "_DEL_",new String[]{})));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testRMVersionLessThanMinimum() throws InterruptedException {
final AtomicInteger numCleanups=new AtomicInteger(0);
YarnConfiguration conf=createNMConfig();
conf.set(YarnConfiguration.NM_RESOURCEMANAGER_MINIMUM_VERSION,"3.0.0");
nm=new NodeManager(){
@Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
MyNodeStatusUpdater myNodeStatusUpdater=new MyNodeStatusUpdater(context,dispatcher,healthChecker,metrics);
MyResourceTracker2 myResourceTracker2=new MyResourceTracker2();
myResourceTracker2.heartBeatNodeAction=NodeAction.NORMAL;
myResourceTracker2.rmVersion="3.0.0";
myNodeStatusUpdater.resourceTracker=myResourceTracker2;
return myNodeStatusUpdater;
}
@Override protected ContainerManagerImpl createContainerManager( Context context, ContainerExecutor exec, DeletionService del, NodeStatusUpdater nodeStatusUpdater, ApplicationACLsManager aclsManager, LocalDirsHandlerService dirsHandler){
return new ContainerManagerImpl(context,exec,del,nodeStatusUpdater,metrics,aclsManager,dirsHandler){
@Override public void cleanUpApplicationsOnNMShutDown(){
super.cleanUpApplicationsOnNMShutDown();
numCleanups.incrementAndGet();
}
}
;
}
}
;
nm.init(conf);
nm.start();
int waitCount=0;
while (nm.getServiceState() != STATE.STARTED && waitCount++ != 20) {
LOG.info("Waiting for NM to stop..");
Thread.sleep(1000);
}
Assert.assertTrue(nm.getServiceState() == STATE.STARTED);
nm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=90000) public void testRecentlyFinishedContainers() throws Exception {
NodeManager nm=new NodeManager();
YarnConfiguration conf=new YarnConfiguration();
conf.set(NodeStatusUpdaterImpl.YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS,"10000");
nm.init(conf);
NodeStatusUpdaterImpl nodeStatusUpdater=(NodeStatusUpdaterImpl)nm.getNodeStatusUpdater();
ApplicationId appId=ApplicationId.newInstance(0,0);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
ContainerId cId=ContainerId.newInstance(appAttemptId,0);
nodeStatusUpdater.addCompletedContainer(cId);
Assert.assertTrue(nodeStatusUpdater.isContainerRecentlyStopped(cId));
long time1=System.currentTimeMillis();
int waitInterval=15;
while (waitInterval-- > 0 && nodeStatusUpdater.isContainerRecentlyStopped(cId)) {
nodeStatusUpdater.removeVeryOldStoppedContainersFromCache();
Thread.sleep(1000);
}
long time2=System.currentTimeMillis();
Assert.assertFalse(nodeStatusUpdater.isContainerRecentlyStopped(cId));
Assert.assertTrue((time2 - time1) >= 10000 && (time2 - time1) <= 250000);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testStopReentrant() throws Exception {
final AtomicInteger numCleanups=new AtomicInteger(0);
nm=new NodeManager(){
@Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
MyNodeStatusUpdater myNodeStatusUpdater=new MyNodeStatusUpdater(context,dispatcher,healthChecker,metrics);
MyResourceTracker2 myResourceTracker2=new MyResourceTracker2();
myResourceTracker2.heartBeatNodeAction=NodeAction.SHUTDOWN;
myNodeStatusUpdater.resourceTracker=myResourceTracker2;
return myNodeStatusUpdater;
}
@Override protected ContainerManagerImpl createContainerManager( Context context, ContainerExecutor exec, DeletionService del, NodeStatusUpdater nodeStatusUpdater, ApplicationACLsManager aclsManager, LocalDirsHandlerService dirsHandler){
return new ContainerManagerImpl(context,exec,del,nodeStatusUpdater,metrics,aclsManager,dirsHandler){
@Override public void cleanUpApplicationsOnNMShutDown(){
super.cleanUpApplicationsOnNMShutDown();
numCleanups.incrementAndGet();
}
}
;
}
}
;
YarnConfiguration conf=createNMConfig();
nm.init(conf);
nm.start();
int waitCount=0;
while (heartBeatID < 1 && waitCount++ != 200) {
Thread.sleep(500);
}
Assert.assertFalse(heartBeatID < 1);
nm.stop();
waitCount=0;
while (nm.getServiceState() != STATE.STOPPED && waitCount++ != 20) {
LOG.info("Waiting for NM to stop..");
Thread.sleep(1000);
}
Assert.assertEquals(STATE.STOPPED,nm.getServiceState());
Assert.assertEquals(numCleanups.get(),1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLocalizerRPC() throws Exception {
InetSocketAddress locAddr=new InetSocketAddress("0.0.0.0",8040);
LocalizerService server=new LocalizerService(locAddr);
try {
server.start();
Configuration conf=new Configuration();
YarnRPC rpc=YarnRPC.create(conf);
LocalizationProtocol client=(LocalizationProtocol)rpc.getProxy(LocalizationProtocol.class,locAddr,conf);
LocalizerStatus status=recordFactory.newRecordInstance(LocalizerStatus.class);
status.setLocalizerId("localizer0");
LocalizerHeartbeatResponse response=client.heartbeat(status);
assertEquals(dieHBResponse(),response);
}
finally {
server.stop();
}
assertTrue(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testLocalizerHeartbeatResponseSerDe() throws Exception {
LocalizerHeartbeatResponse rsrcS=createLocalizerHeartbeatResponse();
assertTrue(rsrcS instanceof LocalizerHeartbeatResponsePBImpl);
LocalizerHeartbeatResponsePBImpl rsrcPb=(LocalizerHeartbeatResponsePBImpl)rsrcS;
DataOutputBuffer out=new DataOutputBuffer();
rsrcPb.getProto().writeDelimitedTo(out);
DataInputBuffer in=new DataInputBuffer();
in.reset(out.getData(),0,out.getLength());
LocalizerHeartbeatResponseProto rsrcPbD=LocalizerHeartbeatResponseProto.parseDelimitedFrom(in);
assertNotNull(rsrcPbD);
LocalizerHeartbeatResponse rsrcD=new LocalizerHeartbeatResponsePBImpl(rsrcPbD);
assertEquals(rsrcS,rsrcD);
assertEquals(createResource(),rsrcS.getResourceSpecs().get(0).getResource());
assertEquals(createResource(),rsrcD.getResourceSpecs().get(0).getResource());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testSerializedExceptionDeSer() throws Exception {
YarnException yarnEx=new YarnException("Yarn_Exception");
SerializedException serEx=SerializedException.newInstance(yarnEx);
Throwable throwable=serEx.deSerialize();
Assert.assertEquals(yarnEx.getClass(),throwable.getClass());
Assert.assertEquals(yarnEx.getMessage(),throwable.getMessage());
IOException ioe=new IOException("Test_IOException");
RuntimeException runtimeException=new RuntimeException("Test_RuntimeException",ioe);
YarnException yarnEx2=new YarnException("Test_YarnException",runtimeException);
SerializedException serEx2=SerializedException.newInstance(yarnEx2);
Throwable throwable2=serEx2.deSerialize();
throwable2.printStackTrace();
Assert.assertEquals(yarnEx2.getClass(),throwable2.getClass());
Assert.assertEquals(yarnEx2.getMessage(),throwable2.getMessage());
Assert.assertEquals(runtimeException.getClass(),throwable2.getCause().getClass());
Assert.assertEquals(runtimeException.getMessage(),throwable2.getCause().getMessage());
Assert.assertEquals(ioe.getClass(),throwable2.getCause().getCause().getClass());
Assert.assertEquals(ioe.getMessage(),throwable2.getCause().getCause().getMessage());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testLocalizerStatusSerDe() throws Exception {
LocalizerStatus rsrcS=createLocalizerStatus();
assertTrue(rsrcS instanceof LocalizerStatusPBImpl);
LocalizerStatusPBImpl rsrcPb=(LocalizerStatusPBImpl)rsrcS;
DataOutputBuffer out=new DataOutputBuffer();
rsrcPb.getProto().writeDelimitedTo(out);
DataInputBuffer in=new DataInputBuffer();
in.reset(out.getData(),0,out.getLength());
LocalizerStatusProto rsrcPbD=LocalizerStatusProto.parseDelimitedFrom(in);
assertNotNull(rsrcPbD);
LocalizerStatus rsrcD=new LocalizerStatusPBImpl(rsrcPbD);
assertEquals(rsrcS,rsrcD);
assertEquals("localizer0",rsrcS.getLocalizerId());
assertEquals("localizer0",rsrcD.getLocalizerId());
assertEquals(createLocalResourceStatus(),rsrcS.getResourceStatus(0));
assertEquals(createLocalResourceStatus(),rsrcD.getResourceStatus(0));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testLocalResourceStatusSerDe() throws Exception {
LocalResourceStatus rsrcS=createLocalResourceStatus();
assertTrue(rsrcS instanceof LocalResourceStatusPBImpl);
LocalResourceStatusPBImpl rsrcPb=(LocalResourceStatusPBImpl)rsrcS;
DataOutputBuffer out=new DataOutputBuffer();
rsrcPb.getProto().writeDelimitedTo(out);
DataInputBuffer in=new DataInputBuffer();
in.reset(out.getData(),0,out.getLength());
LocalResourceStatusProto rsrcPbD=LocalResourceStatusProto.parseDelimitedFrom(in);
assertNotNull(rsrcPbD);
LocalResourceStatus rsrcD=new LocalResourceStatusPBImpl(rsrcPbD);
assertEquals(rsrcS,rsrcD);
assertEquals(createResource(),rsrcS.getResource());
assertEquals(createResource(),rsrcD.getResource());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAuxEventDispatch(){
Configuration conf=new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class);
conf.setInt("A.expected.init",1);
conf.setInt("B.expected.stop",1);
final AuxServices aux=new AuxServices();
aux.init(conf);
aux.start();
ApplicationId appId1=ApplicationId.newInstance(0,65);
ByteBuffer buf=ByteBuffer.allocate(6);
buf.putChar('A');
buf.putInt(65);
buf.flip();
AuxServicesEvent event=new AuxServicesEvent(AuxServicesEventType.APPLICATION_INIT,"user0",appId1,"Asrv",buf);
aux.handle(event);
ApplicationId appId2=ApplicationId.newInstance(0,66);
event=new AuxServicesEvent(AuxServicesEventType.APPLICATION_STOP,"user0",appId2,"Bsrv",null);
aux.handle(event);
Collection servs=aux.getServices();
for ( AuxiliaryService serv : servs) {
ArrayList appIds=((LightService)serv).getAppIdsStopped();
assertEquals("app not properly stopped",1,appIds.size());
assertTrue("wrong app stopped",appIds.contains((Integer)66));
}
for ( AuxiliaryService serv : servs) {
assertNull(((LightService)serv).containerId);
assertNull(((LightService)serv).resource);
}
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId1,1);
ContainerTokenIdentifier cti=new ContainerTokenIdentifier(ContainerId.newInstance(attemptId,1),"","",Resource.newInstance(1,1),0,0,0,Priority.newInstance(0),0);
Container container=new ContainerImpl(null,null,null,null,null,null,cti);
ContainerId containerId=container.getContainerId();
Resource resource=container.getResource();
event=new AuxServicesEvent(AuxServicesEventType.CONTAINER_INIT,container);
aux.handle(event);
for ( AuxiliaryService serv : servs) {
assertEquals(containerId,((LightService)serv).containerId);
assertEquals(resource,((LightService)serv).resource);
((LightService)serv).containerId=null;
((LightService)serv).resource=null;
}
event=new AuxServicesEvent(AuxServicesEventType.CONTAINER_STOP,container);
aux.handle(event);
for ( AuxiliaryService serv : servs) {
assertEquals(containerId,((LightService)serv).containerId);
assertEquals(resource,((LightService)serv).resource);
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerLaunchAndStop() throws IOException, InterruptedException, YarnException {
containerManager.start();
File scriptFile=Shell.appendScriptExtension(tmpDir,"scriptFile");
PrintWriter fileWriter=new PrintWriter(scriptFile);
File processStartFile=new File(tmpDir,"start_file.txt").getAbsoluteFile();
ContainerId cId=createContainerId(0);
if (Shell.WINDOWS) {
fileWriter.println("@echo Hello World!> " + processStartFile);
fileWriter.println("@echo " + cId + ">> "+ processStartFile);
fileWriter.println("@ping -n 100 127.0.0.1 >nul");
}
else {
fileWriter.write("\numask 0");
fileWriter.write("\necho Hello World! > " + processStartFile);
fileWriter.write("\necho $$ >> " + processStartFile);
fileWriter.write("\nexec sleep 100");
}
fileWriter.close();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(scriptFile.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
List commands=Arrays.asList(Shell.getRunScriptCommand(scriptFile));
containerLaunchContext.setCommands(commands);
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager()));
List list=new ArrayList();
list.add(scRequest);
StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
int timeoutSecs=0;
while (!processStartFile.exists() && timeoutSecs++ < 20) {
Thread.sleep(1000);
LOG.info("Waiting for process start-file to be created");
}
Assert.assertTrue("ProcessStartFile doesn't exist!",processStartFile.exists());
BufferedReader reader=new BufferedReader(new FileReader(processStartFile));
Assert.assertEquals("Hello World!",reader.readLine());
String pid=reader.readLine().trim();
Assert.assertEquals(null,reader.readLine());
Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid));
Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid));
List containerIds=new ArrayList();
containerIds.add(cId);
StopContainersRequest stopRequest=StopContainersRequest.newInstance(containerIds);
containerManager.stopContainers(stopRequest);
BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE);
GetContainerStatusesRequest gcsRequest=GetContainerStatusesRequest.newInstance(containerIds);
ContainerStatus containerStatus=containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
int expectedExitCode=ContainerExitStatus.KILLED_BY_APPMASTER;
Assert.assertEquals(expectedExitCode,containerStatus.getExitStatus());
Assert.assertFalse("Process is still alive!",DefaultContainerExecutor.containerIsAlive(pid));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMultipleContainersLaunch() throws Exception {
containerManager.start();
List list=new ArrayList();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
for (int i=0; i < 10; i++) {
ContainerId cId=createContainerId(i);
long identifier=0;
if ((i & 1) == 0) identifier=ResourceManagerConstants.RM_INVALID_IDENTIFIER;
else identifier=DUMMY_RM_IDENTIFIER;
Token containerToken=createContainerToken(cId,identifier,context.getNodeId(),user,context.getContainerTokenSecretManager());
StartContainerRequest request=StartContainerRequest.newInstance(containerLaunchContext,containerToken);
list.add(request);
}
StartContainersRequest requestList=StartContainersRequest.newInstance(list);
StartContainersResponse response=containerManager.startContainers(requestList);
Assert.assertEquals(5,response.getSuccessfullyStartedContainers().size());
for ( ContainerId id : response.getSuccessfullyStartedContainers()) {
Assert.assertEquals(1,id.getId() & 1);
}
Assert.assertEquals(5,response.getFailedRequests().size());
for ( Map.Entry entry : response.getFailedRequests().entrySet()) {
Assert.assertEquals(0,entry.getKey().getId() & 1);
Assert.assertTrue(entry.getValue().getMessage().contains("Container " + entry.getKey() + " rejected as it is allocated by a previous RM"));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMultipleContainersStopAndGetStatus() throws Exception {
containerManager.start();
List startRequest=new ArrayList();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
List containerIds=new ArrayList();
for (int i=0; i < 10; i++) {
ContainerId cId=createContainerId(i);
String user=null;
if ((i & 1) == 0) {
user="Fail";
}
else {
user="Pass";
}
Token containerToken=createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager());
StartContainerRequest request=StartContainerRequest.newInstance(containerLaunchContext,containerToken);
startRequest.add(request);
containerIds.add(cId);
}
StartContainersRequest requestList=StartContainersRequest.newInstance(startRequest);
containerManager.startContainers(requestList);
GetContainerStatusesRequest statusRequest=GetContainerStatusesRequest.newInstance(containerIds);
GetContainerStatusesResponse statusResponse=containerManager.getContainerStatuses(statusRequest);
Assert.assertEquals(5,statusResponse.getContainerStatuses().size());
for ( ContainerStatus status : statusResponse.getContainerStatuses()) {
Assert.assertEquals(1,status.getContainerId().getId() & 1);
}
Assert.assertEquals(5,statusResponse.getFailedRequests().size());
for ( Map.Entry entry : statusResponse.getFailedRequests().entrySet()) {
Assert.assertEquals(0,entry.getKey().getId() & 1);
Assert.assertTrue(entry.getValue().getMessage().contains("Reject this container"));
}
StopContainersRequest stopRequest=StopContainersRequest.newInstance(containerIds);
StopContainersResponse stopResponse=containerManager.stopContainers(stopRequest);
Assert.assertEquals(5,stopResponse.getSuccessfullyStoppedContainers().size());
for ( ContainerId id : stopResponse.getSuccessfullyStoppedContainers()) {
Assert.assertEquals(1,id.getId() & 1);
}
Assert.assertEquals(5,stopResponse.getFailedRequests().size());
for ( Map.Entry entry : stopResponse.getFailedRequests().entrySet()) {
Assert.assertEquals(0,entry.getKey().getId() & 1);
Assert.assertTrue(entry.getValue().getMessage().contains("Reject this container"));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerSetup() throws Exception {
containerManager.start();
File dir=new File(tmpDir,"dir");
dir.mkdirs();
File file=new File(dir,"file");
PrintWriter fileWriter=new PrintWriter(file);
fileWriter.write("Hello World!");
fileWriter.close();
ContainerId cId=createContainerId(0);
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(file.getAbsolutePath())));
LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(file.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager()));
List list=new ArrayList();
list.add(scRequest);
StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE);
ApplicationId appId=cId.getApplicationAttemptId().getApplicationId();
String appIDStr=ConverterUtils.toString(appId);
String containerIDStr=ConverterUtils.toString(cId);
File userCacheDir=new File(localDir,ContainerLocalizer.USERCACHE);
File userDir=new File(userCacheDir,user);
File appCache=new File(userDir,ContainerLocalizer.APPCACHE);
File appDir=new File(appCache,appIDStr);
File containerDir=new File(appDir,containerIDStr);
File targetFile=new File(containerDir,destinationFile);
File sysDir=new File(localDir,ResourceLocalizationService.NM_PRIVATE_DIR);
File appSysDir=new File(sysDir,appIDStr);
File containerSysDir=new File(appSysDir,containerIDStr);
for ( File f : new File[]{localDir,sysDir,userCacheDir,appDir,appSysDir,containerDir,containerSysDir}) {
Assert.assertTrue(f.getAbsolutePath() + " doesn't exist!!",f.exists());
Assert.assertTrue(f.getAbsolutePath() + " is not a directory!!",f.isDirectory());
}
Assert.assertTrue(targetFile.getAbsolutePath() + " doesn't exist!!",targetFile.exists());
BufferedReader reader=new BufferedReader(new FileReader(targetFile));
Assert.assertEquals("Hello World!",reader.readLine());
Assert.assertEquals(null,reader.readLine());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testStartContainerFailureWithUnknownAuxService() throws Exception {
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"existService"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"existService"),ServiceA.class,Service.class);
containerManager.start();
List startRequest=new ArrayList();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
Map serviceData=new HashMap();
String serviceName="non_exist_auxService";
serviceData.put(serviceName,ByteBuffer.wrap(serviceName.getBytes()));
containerLaunchContext.setServiceData(serviceData);
ContainerId cId=createContainerId(0);
String user="start_container_fail";
Token containerToken=createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager());
StartContainerRequest request=StartContainerRequest.newInstance(containerLaunchContext,containerToken);
startRequest.add(request);
StartContainersRequest requestList=StartContainersRequest.newInstance(startRequest);
StartContainersResponse response=containerManager.startContainers(requestList);
Assert.assertTrue(response.getFailedRequests().size() == 1);
Assert.assertTrue(response.getSuccessfullyStartedContainers().size() == 0);
Assert.assertTrue(response.getFailedRequests().containsKey(cId));
Assert.assertTrue(response.getFailedRequests().get(cId).getMessage().contains("The auxService:" + serviceName + " does not exist"));
}
APIUtilityVerifier BranchVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerManagerInitialization() throws IOException {
containerManager.start();
InetAddress localAddr=InetAddress.getLocalHost();
String fqdn=localAddr.getCanonicalHostName();
if (!localAddr.getHostAddress().equals(fqdn)) {
Assert.assertEquals(fqdn,context.getNodeId().getHost());
}
boolean throwsException=false;
try {
List containerIds=new ArrayList();
ContainerId id=createContainerId(0);
containerIds.add(id);
GetContainerStatusesRequest request=GetContainerStatusesRequest.newInstance(containerIds);
GetContainerStatusesResponse response=containerManager.getContainerStatuses(request);
if (response.getFailedRequests().containsKey(id)) {
throw response.getFailedRequests().get(id).deSerialize();
}
}
catch ( Throwable e) {
throwsException=true;
}
Assert.assertTrue(throwsException);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testLocalFilesCleanup() throws InterruptedException, IOException, YarnException {
delSrvc=new DeletionService(exec);
delSrvc.init(conf);
containerManager=createContainerManager(delSrvc);
containerManager.init(conf);
containerManager.start();
File dir=new File(tmpDir,"dir");
dir.mkdirs();
File file=new File(dir,"file");
PrintWriter fileWriter=new PrintWriter(file);
fileWriter.write("Hello World!");
fileWriter.close();
ContainerId cId=createContainerId(0);
ApplicationId appId=cId.getApplicationAttemptId().getApplicationId();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
URL resource_alpha=ConverterUtils.getYarnUrlFromPath(FileContext.getLocalFSFileContext().makeQualified(new Path(file.getAbsolutePath())));
LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(file.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager()));
List list=new ArrayList();
list.add(scRequest);
StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE);
BaseContainerManagerTest.waitForApplicationState(containerManager,cId.getApplicationAttemptId().getApplicationId(),ApplicationState.RUNNING);
String appIDStr=ConverterUtils.toString(appId);
String containerIDStr=ConverterUtils.toString(cId);
File userCacheDir=new File(localDir,ContainerLocalizer.USERCACHE);
File userDir=new File(userCacheDir,user);
File appCache=new File(userDir,ContainerLocalizer.APPCACHE);
File appDir=new File(appCache,appIDStr);
File containerDir=new File(appDir,containerIDStr);
File targetFile=new File(containerDir,destinationFile);
File sysDir=new File(localDir,ResourceLocalizationService.NM_PRIVATE_DIR);
File appSysDir=new File(sysDir,appIDStr);
File containerSysDir=new File(appSysDir,containerIDStr);
Assert.assertTrue("AppDir " + appDir.getAbsolutePath() + " doesn't exist!!",appDir.exists());
Assert.assertTrue("AppSysDir " + appSysDir.getAbsolutePath() + " doesn't exist!!",appSysDir.exists());
for ( File f : new File[]{containerDir,containerSysDir}) {
Assert.assertFalse(f.getAbsolutePath() + " exists!!",f.exists());
}
Assert.assertFalse(targetFile.getAbsolutePath() + " exists!!",targetFile.exists());
containerManager.handle(new CMgrCompletedAppsEvent(Arrays.asList(new ApplicationId[]{appId}),CMgrCompletedAppsEvent.Reason.ON_SHUTDOWN));
BaseContainerManagerTest.waitForApplicationState(containerManager,cId.getApplicationAttemptId().getApplicationId(),ApplicationState.FINISHED);
for ( File f : new File[]{appDir,containerDir,appSysDir,containerSysDir}) {
int timeout=0;
while (f.exists() && timeout++ < 15) {
Thread.sleep(1000);
}
Assert.assertFalse(f.getAbsolutePath() + " exists!!",f.exists());
}
int timeout=0;
while (targetFile.exists() && timeout++ < 15) {
Thread.sleep(1000);
}
Assert.assertFalse(targetFile.getAbsolutePath() + " exists!!",targetFile.exists());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationRecovery() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true);
conf.set(YarnConfiguration.NM_ADDRESS,"localhost:1234");
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE,true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL,"yarn_admin_user");
NMStateStoreService stateStore=new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
Context context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore);
ContainerManagerImpl cm=createContainerManager(context);
cm.init(conf);
cm.start();
MasterKey masterKey=new MasterKeyPBImpl();
masterKey.setKeyId(123);
masterKey.setBytes(ByteBuffer.wrap(new byte[]{new Integer(123).byteValue()}));
context.getContainerTokenSecretManager().setMasterKey(masterKey);
context.getNMTokenSecretManager().setMasterKey(masterKey);
String appUser="app_user1";
String modUser="modify_user1";
String viewUser="view_user1";
String enemyUser="enemy_user";
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId cid=ContainerId.newInstance(attemptId,1);
Map localResources=Collections.emptyMap();
Map containerEnv=Collections.emptyMap();
List containerCmds=Collections.emptyList();
Map serviceData=Collections.emptyMap();
Credentials containerCreds=new Credentials();
DataOutputBuffer dob=new DataOutputBuffer();
containerCreds.writeTokenStorageToStream(dob);
ByteBuffer containerTokens=ByteBuffer.wrap(dob.getData(),0,dob.getLength());
Map acls=new HashMap();
acls.put(ApplicationAccessType.MODIFY_APP,modUser);
acls.put(ApplicationAccessType.VIEW_APP,viewUser);
ContainerLaunchContext clc=ContainerLaunchContext.newInstance(localResources,containerEnv,containerCmds,serviceData,containerTokens,acls);
StartContainersResponse startResponse=startContainer(context,cm,cid,clc);
assertTrue(startResponse.getFailedRequests().isEmpty());
assertEquals(1,context.getApplications().size());
Application app=context.getApplications().get(appId);
assertNotNull(app);
waitForAppState(app,ApplicationState.INITING);
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.VIEW_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser),ApplicationAccessType.VIEW_APP,appUser,appId));
cm.stop();
context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore);
cm=createContainerManager(context);
cm.init(conf);
cm.start();
assertEquals(1,context.getApplications().size());
app=context.getApplications().get(appId);
assertNotNull(app);
waitForAppState(app,ApplicationState.INITING);
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.VIEW_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser),ApplicationAccessType.VIEW_APP,appUser,appId));
List finishedApps=new ArrayList();
finishedApps.add(appId);
cm.handle(new CMgrCompletedAppsEvent(finishedApps,CMgrCompletedAppsEvent.Reason.BY_RESOURCEMANAGER));
waitForAppState(app,ApplicationState.APPLICATION_RESOURCES_CLEANINGUP);
cm.stop();
context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore);
cm=createContainerManager(context);
cm.init(conf);
cm.start();
assertEquals(1,context.getApplications().size());
app=context.getApplications().get(appId);
assertNotNull(app);
waitForAppState(app,ApplicationState.APPLICATION_RESOURCES_CLEANINGUP);
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.VIEW_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser),ApplicationAccessType.VIEW_APP,appUser,appId));
app.handle(new ApplicationEvent(app.getAppId(),ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP));
assertEquals(app.getApplicationState(),ApplicationState.FINISHED);
app.handle(new ApplicationEvent(app.getAppId(),ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED));
cm.stop();
context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore);
cm=createContainerManager(context);
cm.init(conf);
cm.start();
assertTrue(context.getApplications().isEmpty());
cm.stop();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testInitWhileDone() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(6,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
wc.launchContainer();
reset(wc.localizerBus);
wc.containerSuccessful();
wc.containerResourcesCleanup();
assertEquals(ContainerState.DONE,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
wc.initContainer();
assertEquals(ContainerState.DONE,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testKillOnLocalizedWhenContainerLaunched() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(17,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
assertEquals(ContainerState.LOCALIZED,wc.c.getContainerState());
ContainerLaunch launcher=wc.launcher.running.get(wc.c.getContainerId());
launcher.call();
wc.drainDispatcherEvents();
assertEquals(ContainerState.EXITED_WITH_FAILURE,wc.c.getContainerState());
wc.killContainer();
assertEquals(ContainerState.EXITED_WITH_FAILURE,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testKillOnLocalizedWhenContainerNotLaunched() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(17,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
assertEquals(ContainerState.LOCALIZED,wc.c.getContainerState());
ContainerLaunch launcher=wc.launcher.running.get(wc.c.getContainerId());
wc.killContainer();
assertEquals(ContainerState.KILLING,wc.c.getContainerState());
launcher.call();
wc.drainDispatcherEvents();
assertEquals(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
wc.c.handle(new ContainerEvent(wc.c.getContainerId(),ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP));
assertEquals(0,metrics.getRunningContainers());
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testCleanupOnKillRequest() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(12,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
wc.launchContainer();
reset(wc.localizerBus);
wc.killContainer();
assertEquals(ContainerState.KILLING,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
wc.containerKilledOnRequest();
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testCleanupOnSuccess() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(11,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
wc.launchContainer();
reset(wc.localizerBus);
wc.containerSuccessful();
assertEquals(ContainerState.EXITED_WITH_SUCCESS,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testLaunchAfterKillRequest() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(14,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
wc.killContainer();
assertEquals(ContainerState.KILLING,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
wc.launchContainer();
assertEquals(ContainerState.KILLING,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
wc.containerKilledOnRequest();
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testCleanupOnFailure() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(10,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
wc.launchContainer();
reset(wc.localizerBus);
wc.containerFailed(ExitCode.FORCE_KILLED.getExitCode());
assertEquals(ContainerState.EXITED_WITH_FAILURE,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testExternalKill() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(13,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
wc.launchContainer();
reset(wc.localizerBus);
wc.containerKilledOnRequest();
assertEquals(ContainerState.EXITED_WITH_FAILURE,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testLocalizationFailureAtDone() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(6,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
wc.launchContainer();
reset(wc.localizerBus);
wc.containerSuccessful();
wc.containerResourcesCleanup();
assertEquals(ContainerState.DONE,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
wc.resourceFailedContainer();
assertEquals(ContainerState.DONE,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Verify container launch when all resources already cached.
*/
@Test public void testLocalizationLaunch() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(8,314159265358979L,4344,"yak");
assertEquals(ContainerState.NEW,wc.c.getContainerState());
wc.initContainer();
Map> localPaths=wc.localizeResources();
assertEquals(ContainerState.LOCALIZED,wc.c.getContainerState());
assertNotNull(wc.c.getLocalizedResources());
for ( Entry> loc : wc.c.getLocalizedResources().entrySet()) {
assertEquals(localPaths.remove(loc.getKey()),loc.getValue());
}
assertTrue(localPaths.isEmpty());
final WrappedContainer wcf=wc;
ArgumentMatcher matchesContainerLaunch=new ArgumentMatcher(){
@Override public boolean matches( Object o){
ContainersLauncherEvent launchEvent=(ContainersLauncherEvent)o;
return wcf.c == launchEvent.getContainer();
}
}
;
verify(wc.launcherBus).handle(argThat(matchesContainerLaunch));
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSpecialCharSymlinks() throws IOException {
File shellFile=null;
File tempFile=null;
String badSymlink=Shell.WINDOWS ? "foo@zz_#!-+bar.cmd" : "foo@zz%_#*&!-+= bar()";
File symLinkFile=null;
try {
shellFile=Shell.appendScriptExtension(tmpDir,"hello");
tempFile=Shell.appendScriptExtension(tmpDir,"temp");
String timeoutCommand=Shell.WINDOWS ? "@echo \"hello\"" : "echo \"hello\"";
PrintWriter writer=new PrintWriter(new FileOutputStream(shellFile));
FileUtil.setExecutable(shellFile,true);
writer.println(timeoutCommand);
writer.close();
Map> resources=new HashMap>();
Path path=new Path(shellFile.getAbsolutePath());
resources.put(path,Arrays.asList(badSymlink));
FileOutputStream fos=new FileOutputStream(tempFile);
Map env=new HashMap();
List commands=new ArrayList();
if (Shell.WINDOWS) {
commands.add("cmd");
commands.add("/c");
commands.add("\"" + badSymlink + "\"");
}
else {
commands.add("/bin/sh ./\\\"" + badSymlink + "\\\"");
}
ContainerLaunch.writeLaunchEnv(fos,env,resources,commands);
fos.flush();
fos.close();
FileUtil.setExecutable(tempFile,true);
Shell.ShellCommandExecutor shexc=new Shell.ShellCommandExecutor(new String[]{tempFile.getAbsolutePath()},tmpDir);
shexc.execute();
assertEquals(shexc.getExitCode(),0);
assert (shexc.getOutput().contains("hello"));
symLinkFile=new File(tmpDir,badSymlink);
}
finally {
if (shellFile != null && shellFile.exists()) {
shellFile.delete();
}
if (tempFile != null && tempFile.exists()) {
tempFile.delete();
}
if (symLinkFile != null && symLinkFile.exists()) {
symLinkFile.delete();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=20000) public void testInvalidEnvSyntaxDiagnostics() throws IOException {
File shellFile=null;
try {
shellFile=Shell.appendScriptExtension(tmpDir,"hello");
Map> resources=new HashMap>();
FileOutputStream fos=new FileOutputStream(shellFile);
FileUtil.setExecutable(shellFile,true);
Map env=new HashMap();
env.put("APPLICATION_WORKFLOW_CONTEXT","{\"workflowId\":\"609f91c5cd83\"," + "\"workflowName\":\"\n\ninsert table " + "\npartition (cd_education_status)\nselect cd_demo_sk, cd_gender, ");
List commands=new ArrayList();
ContainerLaunch.writeLaunchEnv(fos,env,resources,commands);
fos.flush();
fos.close();
Map cmdEnv=new HashMap();
cmdEnv.put("LANG","C");
Shell.ShellCommandExecutor shexc=new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},tmpDir,cmdEnv);
String diagnostics=null;
try {
shexc.execute();
Assert.fail("Should catch exception");
}
catch ( ExitCodeException e) {
diagnostics=e.getMessage();
}
Assert.assertTrue(diagnostics.contains(Shell.WINDOWS ? "is not recognized as an internal or external command" : "command not found"));
Assert.assertTrue(shexc.getExitCode() != 0);
}
finally {
if (shellFile != null && shellFile.exists()) {
shellFile.delete();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=20000) public void testInvalidSymlinkDiagnostics() throws IOException {
File shellFile=null;
File tempFile=null;
String symLink=Shell.WINDOWS ? "test.cmd" : "test";
File symLinkFile=null;
try {
shellFile=Shell.appendScriptExtension(tmpDir,"hello");
tempFile=Shell.appendScriptExtension(tmpDir,"temp");
String timeoutCommand=Shell.WINDOWS ? "@echo \"hello\"" : "echo \"hello\"";
PrintWriter writer=new PrintWriter(new FileOutputStream(shellFile));
FileUtil.setExecutable(shellFile,true);
writer.println(timeoutCommand);
writer.close();
Map> resources=new HashMap>();
Path invalidPath=new Path(shellFile.getAbsolutePath() + "randomPath");
resources.put(invalidPath,Arrays.asList(symLink));
FileOutputStream fos=new FileOutputStream(tempFile);
Map env=new HashMap();
List commands=new ArrayList();
if (Shell.WINDOWS) {
commands.add("cmd");
commands.add("/c");
commands.add("\"" + symLink + "\"");
}
else {
commands.add("/bin/sh ./\\\"" + symLink + "\\\"");
}
ContainerLaunch.writeLaunchEnv(fos,env,resources,commands);
fos.flush();
fos.close();
FileUtil.setExecutable(tempFile,true);
Shell.ShellCommandExecutor shexc=new Shell.ShellCommandExecutor(new String[]{tempFile.getAbsolutePath()},tmpDir);
String diagnostics=null;
try {
shexc.execute();
Assert.fail("Should catch exception");
}
catch ( ExitCodeException e) {
diagnostics=e.getMessage();
}
Assert.assertNotNull(diagnostics);
Assert.assertTrue(shexc.getExitCode() != 0);
symLinkFile=new File(tmpDir,symLink);
}
finally {
if (shellFile != null && shellFile.exists()) {
shellFile.delete();
}
if (tempFile != null && tempFile.exists()) {
tempFile.delete();
}
if (symLinkFile != null && symLinkFile.exists()) {
symLinkFile.delete();
}
}
}
APIUtilityVerifier BranchVerifier EqualityVerifier
@Test(timeout=10000) public void testEnvExpansion() throws IOException {
Path logPath=new Path("/nm/container/logs");
String input=Apps.crossPlatformify("HADOOP_HOME") + "/share/hadoop/common/*" + ApplicationConstants.CLASS_PATH_SEPARATOR+ Apps.crossPlatformify("HADOOP_HOME")+ "/share/hadoop/common/lib/*"+ ApplicationConstants.CLASS_PATH_SEPARATOR+ Apps.crossPlatformify("HADOOP_LOG_HOME")+ ApplicationConstants.LOG_DIR_EXPANSION_VAR;
String res=ContainerLaunch.expandEnvironment(input,logPath);
if (Shell.WINDOWS) {
Assert.assertEquals("%HADOOP_HOME%/share/hadoop/common/*;" + "%HADOOP_HOME%/share/hadoop/common/lib/*;" + "%HADOOP_LOG_HOME%/nm/container/logs",res);
}
else {
Assert.assertEquals("$HADOOP_HOME/share/hadoop/common/*:" + "$HADOOP_HOME/share/hadoop/common/lib/*:" + "$HADOOP_LOG_HOME/nm/container/logs",res);
}
System.out.println(res);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=20000) public void testContainerLaunchStdoutAndStderrDiagnostics() throws IOException {
File shellFile=null;
try {
shellFile=Shell.appendScriptExtension(tmpDir,"hello");
String command=Shell.WINDOWS ? "@echo \"hello\" & @echo \"error\" 1>&2 & exit /b 2" : "echo \"hello\"; echo \"error\" 1>&2; exit 2;";
PrintWriter writer=new PrintWriter(new FileOutputStream(shellFile));
FileUtil.setExecutable(shellFile,true);
writer.println(command);
writer.close();
Map> resources=new HashMap>();
FileOutputStream fos=new FileOutputStream(shellFile,true);
Map env=new HashMap();
List commands=new ArrayList();
commands.add(command);
ContainerLaunch.writeLaunchEnv(fos,env,resources,commands);
fos.flush();
fos.close();
Shell.ShellCommandExecutor shexc=new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},tmpDir);
String diagnostics=null;
try {
shexc.execute();
Assert.fail("Should catch exception");
}
catch ( ExitCodeException e) {
diagnostics=e.getMessage();
}
Assert.assertTrue(diagnostics.contains("error"));
Assert.assertTrue(shexc.getOutput().contains("hello"));
Assert.assertTrue(shexc.getExitCode() == 2);
}
finally {
if (shellFile != null && shellFile.exists()) {
shellFile.delete();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* See if environment variable is forwarded using sanitizeEnv.
* @throws Exception
*/
@Test(timeout=60000) public void testContainerEnvVariables() throws Exception {
containerManager.start();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
ApplicationId appId=ApplicationId.newInstance(0,0);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId cId=ContainerId.newInstance(appAttemptId,0);
Map userSetEnv=new HashMap();
userSetEnv.put(Environment.CONTAINER_ID.name(),"user_set_container_id");
userSetEnv.put(Environment.NM_HOST.name(),"user_set_NM_HOST");
userSetEnv.put(Environment.NM_PORT.name(),"user_set_NM_PORT");
userSetEnv.put(Environment.NM_HTTP_PORT.name(),"user_set_NM_HTTP_PORT");
userSetEnv.put(Environment.LOCAL_DIRS.name(),"user_set_LOCAL_DIR");
userSetEnv.put(Environment.USER.key(),"user_set_" + Environment.USER.key());
userSetEnv.put(Environment.LOGNAME.name(),"user_set_LOGNAME");
userSetEnv.put(Environment.PWD.name(),"user_set_PWD");
userSetEnv.put(Environment.HOME.name(),"user_set_HOME");
containerLaunchContext.setEnvironment(userSetEnv);
File scriptFile=Shell.appendScriptExtension(tmpDir,"scriptFile");
PrintWriter fileWriter=new PrintWriter(scriptFile);
File processStartFile=new File(tmpDir,"env_vars.txt").getAbsoluteFile();
if (Shell.WINDOWS) {
fileWriter.println("@echo " + Environment.CONTAINER_ID.$() + "> "+ processStartFile);
fileWriter.println("@echo " + Environment.NM_HOST.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.NM_PORT.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.NM_HTTP_PORT.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.LOCAL_DIRS.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.USER.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.LOGNAME.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.PWD.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.HOME.$() + ">> "+ processStartFile);
for ( String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
fileWriter.println("@echo %" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName+ "%>> "+ processStartFile);
}
fileWriter.println("@echo " + cId + ">> "+ processStartFile);
fileWriter.println("@ping -n 100 127.0.0.1 >nul");
}
else {
fileWriter.write("\numask 0");
fileWriter.write("\necho $" + Environment.CONTAINER_ID.name() + " > "+ processStartFile);
fileWriter.write("\necho $" + Environment.NM_HOST.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.NM_PORT.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.NM_HTTP_PORT.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.LOCAL_DIRS.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.USER.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.LOGNAME.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.PWD.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.HOME.name() + " >> "+ processStartFile);
for ( String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
fileWriter.write("\necho $" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName+ " >> "+ processStartFile);
}
fileWriter.write("\necho $$ >> " + processStartFile);
fileWriter.write("\nexec sleep 100");
}
fileWriter.close();
URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(scriptFile.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
List commands=Arrays.asList(Shell.getRunScriptCommand(scriptFile));
containerLaunchContext.setCommands(commands);
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,Priority.newInstance(0),0));
List list=new ArrayList();
list.add(scRequest);
StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
int timeoutSecs=0;
while (!processStartFile.exists() && timeoutSecs++ < 20) {
Thread.sleep(1000);
LOG.info("Waiting for process start-file to be created");
}
Assert.assertTrue("ProcessStartFile doesn't exist!",processStartFile.exists());
List localDirs=dirsHandler.getLocalDirs();
List logDirs=dirsHandler.getLogDirs();
List appDirs=new ArrayList(localDirs.size());
for ( String localDir : localDirs) {
Path usersdir=new Path(localDir,ContainerLocalizer.USERCACHE);
Path userdir=new Path(usersdir,user);
Path appsdir=new Path(userdir,ContainerLocalizer.APPCACHE);
appDirs.add(new Path(appsdir,appId.toString()));
}
List containerLogDirs=new ArrayList();
String relativeContainerLogDir=ContainerLaunch.getRelativeContainerLogDir(appId.toString(),cId.toString());
for ( String logDir : logDirs) {
containerLogDirs.add(logDir + Path.SEPARATOR + relativeContainerLogDir);
}
BufferedReader reader=new BufferedReader(new FileReader(processStartFile));
Assert.assertEquals(cId.toString(),reader.readLine());
Assert.assertEquals(context.getNodeId().getHost(),reader.readLine());
Assert.assertEquals(String.valueOf(context.getNodeId().getPort()),reader.readLine());
Assert.assertEquals(String.valueOf(HTTP_PORT),reader.readLine());
Assert.assertEquals(StringUtils.join(",",appDirs),reader.readLine());
Assert.assertEquals(user,reader.readLine());
Assert.assertEquals(user,reader.readLine());
String obtainedPWD=reader.readLine();
boolean found=false;
for ( Path localDir : appDirs) {
if (new Path(localDir,cId.toString()).toString().equals(obtainedPWD)) {
found=true;
break;
}
}
Assert.assertTrue("Wrong local-dir found : " + obtainedPWD,found);
Assert.assertEquals(conf.get(YarnConfiguration.NM_USER_HOME_DIR,YarnConfiguration.DEFAULT_NM_USER_HOME_DIR),reader.readLine());
for ( String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
Assert.assertEquals(containerManager.getAuxServiceMetaData().get(serviceName),ByteBuffer.wrap(Base64.decodeBase64(reader.readLine().getBytes())));
}
Assert.assertEquals(cId.toString(),containerLaunchContext.getEnvironment().get(Environment.CONTAINER_ID.name()));
Assert.assertEquals(context.getNodeId().getHost(),containerLaunchContext.getEnvironment().get(Environment.NM_HOST.name()));
Assert.assertEquals(String.valueOf(context.getNodeId().getPort()),containerLaunchContext.getEnvironment().get(Environment.NM_PORT.name()));
Assert.assertEquals(String.valueOf(HTTP_PORT),containerLaunchContext.getEnvironment().get(Environment.NM_HTTP_PORT.name()));
Assert.assertEquals(StringUtils.join(",",appDirs),containerLaunchContext.getEnvironment().get(Environment.LOCAL_DIRS.name()));
Assert.assertEquals(StringUtils.join(",",containerLogDirs),containerLaunchContext.getEnvironment().get(Environment.LOG_DIRS.name()));
Assert.assertEquals(user,containerLaunchContext.getEnvironment().get(Environment.USER.name()));
Assert.assertEquals(user,containerLaunchContext.getEnvironment().get(Environment.LOGNAME.name()));
found=false;
obtainedPWD=containerLaunchContext.getEnvironment().get(Environment.PWD.name());
for ( Path localDir : appDirs) {
if (new Path(localDir,cId.toString()).toString().equals(obtainedPWD)) {
found=true;
break;
}
}
Assert.assertTrue("Wrong local-dir found : " + obtainedPWD,found);
Assert.assertEquals(conf.get(YarnConfiguration.NM_USER_HOME_DIR,YarnConfiguration.DEFAULT_NM_USER_HOME_DIR),containerLaunchContext.getEnvironment().get(Environment.HOME.name()));
String pid=reader.readLine().trim();
Assert.assertEquals(null,reader.readLine());
Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid));
Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid));
List containerIds=new ArrayList();
containerIds.add(cId);
StopContainersRequest stopRequest=StopContainersRequest.newInstance(containerIds);
containerManager.stopContainers(stopRequest);
BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE);
GetContainerStatusesRequest gcsRequest=GetContainerStatusesRequest.newInstance(containerIds);
ContainerStatus containerStatus=containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
int expectedExitCode=ContainerExitStatus.KILLED_BY_APPMASTER;
Assert.assertEquals(expectedExitCode,containerStatus.getExitStatus());
Assert.assertFalse("Process is still alive!",DefaultContainerExecutor.containerIsAlive(pid));
}
APIUtilityVerifier IterativeVerifier EqualityVerifier
@Test public void testDirectoryConversion(){
for (int i=0; i < 10000; ++i) {
String path=Directory.getRelativePath(i);
Assert.assertEquals("Incorrect conversion for " + i,i,Directory.getDirectoryNumber(path));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testResourceOrder() throws URISyntaxException {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
long basetime=r.nextLong() >>> 2;
org.apache.hadoop.yarn.api.records.LocalResource yA=getYarnResource(new Path("http://yak.org:80/foobar"),-1,basetime,FILE,PUBLIC,"^/foo/.*");
final LocalResourceRequest a=new LocalResourceRequest(yA);
org.apache.hadoop.yarn.api.records.LocalResource yB=getYarnResource(new Path("http://yak.org:80/foobaz"),-1,basetime,FILE,PUBLIC,"^/foo/.*");
LocalResourceRequest b=new LocalResourceRequest(yB);
assertTrue(0 > a.compareTo(b));
yB=getYarnResource(new Path("http://yak.org:80/foobar"),-1,basetime + 1,FILE,PUBLIC,"^/foo/.*");
b=new LocalResourceRequest(yB);
assertTrue(0 > a.compareTo(b));
yB=getYarnResource(new Path("http://yak.org:80/foobar"),-1,basetime,ARCHIVE,PUBLIC,"^/foo/.*");
b=new LocalResourceRequest(yB);
assertTrue(0 != a.compareTo(b));
yB=getYarnResource(new Path("http://yak.org:80/foobar"),-1,basetime,ARCHIVE,PUBLIC,"^/food/.*");
b=new LocalResourceRequest(yB);
assertTrue(0 != a.compareTo(b));
yB=getYarnResource(new Path("http://yak.org:80/foobar"),-1,basetime,ARCHIVE,PUBLIC,null);
b=new LocalResourceRequest(yB);
assertTrue(0 != a.compareTo(b));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testStateStoreSuccessfulLocalization() throws Exception {
final String user="someuser";
final ApplicationId appId=ApplicationId.newInstance(1,1);
final Path localDir=new Path("/tmp");
Configuration conf=new YarnConfiguration();
DrainDispatcher dispatcher=null;
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
DeletionService mockDelService=mock(DeletionService.class);
NMStateStoreService stateStore=mock(NMStateStoreService.class);
try {
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,appId,dispatcher,false,conf,stateStore);
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.APPLICATION);
LocalizerContext lc1=new LocalizerContext(user,cId1,null);
ResourceEvent reqEvent1=new ResourceRequestEvent(lr1,LocalResourceVisibility.APPLICATION,lc1);
tracker.handle(reqEvent1);
dispatcher.await();
Path hierarchicalPath1=tracker.getPathForLocalization(lr1,localDir);
ArgumentCaptor localResourceCaptor=ArgumentCaptor.forClass(LocalResourceProto.class);
ArgumentCaptor pathCaptor=ArgumentCaptor.forClass(Path.class);
verify(stateStore).startResourceLocalization(eq(user),eq(appId),localResourceCaptor.capture(),pathCaptor.capture());
LocalResourceProto lrProto=localResourceCaptor.getValue();
Path localizedPath1=pathCaptor.getValue();
Assert.assertEquals(lr1,new LocalResourceRequest(new LocalResourcePBImpl(lrProto)));
Assert.assertEquals(hierarchicalPath1,localizedPath1.getParent());
ResourceLocalizedEvent rle1=new ResourceLocalizedEvent(lr1,pathCaptor.getValue(),120);
tracker.handle(rle1);
dispatcher.await();
ArgumentCaptor localizedProtoCaptor=ArgumentCaptor.forClass(LocalizedResourceProto.class);
verify(stateStore).finishResourceLocalization(eq(user),eq(appId),localizedProtoCaptor.capture());
LocalizedResourceProto localizedProto=localizedProtoCaptor.getValue();
Assert.assertEquals(lr1,new LocalResourceRequest(new LocalResourcePBImpl(localizedProto.getResource())));
Assert.assertEquals(localizedPath1.toString(),localizedProto.getLocalPath());
LocalizedResource localizedRsrc1=tracker.getLocalizedResource(lr1);
Assert.assertNotNull(localizedRsrc1);
tracker.handle(new ResourceReleaseEvent(lr1,cId1));
dispatcher.await();
boolean removeResult=tracker.remove(localizedRsrc1,mockDelService);
Assert.assertTrue(removeResult);
verify(stateStore).removeLocalizedResource(eq(user),eq(appId),eq(localizedPath1));
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) @SuppressWarnings("unchecked") public void test(){
String user="testuser";
DrainDispatcher dispatcher=null;
try {
Configuration conf=new Configuration();
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
DeletionService mockDelService=mock(DeletionService.class);
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalizerContext lc1=new LocalizerContext(user,cId1,null);
ContainerId cId2=BuilderUtils.newContainerId(1,1,1,2);
LocalizerContext lc2=new LocalizerContext(user,cId2,null);
LocalResourceRequest req1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC);
LocalResourceRequest req2=createLocalResourceRequest(user,2,1,LocalResourceVisibility.PUBLIC);
LocalizedResource lr1=createLocalizedResource(req1,dispatcher);
LocalizedResource lr2=createLocalizedResource(req2,dispatcher);
ConcurrentMap localrsrc=new ConcurrentHashMap();
localrsrc.put(req1,lr1);
localrsrc.put(req2,lr2);
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,false,conf,new NMNullStateStoreService());
ResourceEvent req11Event=new ResourceRequestEvent(req1,LocalResourceVisibility.PUBLIC,lc1);
ResourceEvent req12Event=new ResourceRequestEvent(req1,LocalResourceVisibility.PUBLIC,lc2);
ResourceEvent req21Event=new ResourceRequestEvent(req2,LocalResourceVisibility.PUBLIC,lc1);
ResourceEvent rel11Event=new ResourceReleaseEvent(req1,cId1);
ResourceEvent rel12Event=new ResourceReleaseEvent(req1,cId2);
ResourceEvent rel21Event=new ResourceReleaseEvent(req2,cId1);
tracker.handle(req11Event);
tracker.handle(req12Event);
tracker.handle(req21Event);
dispatcher.await();
verify(localizerEventHandler,times(3)).handle(any(LocalizerResourceRequestEvent.class));
Assert.assertEquals(2,lr1.getRefCount());
Assert.assertEquals(1,lr2.getRefCount());
tracker.handle(rel21Event);
dispatcher.await();
verifyTrackedResourceCount(tracker,2);
Assert.assertEquals(2,lr1.getRefCount());
Assert.assertFalse(tracker.remove(lr1,mockDelService));
verifyTrackedResourceCount(tracker,2);
ResourceLocalizedEvent rle=new ResourceLocalizedEvent(req1,new Path("file:///tmp/r1"),1);
lr1.handle(rle);
Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED));
tracker.handle(rel11Event);
tracker.handle(rel12Event);
Assert.assertEquals(0,lr1.getRefCount());
Assert.assertTrue(tracker.remove(lr1,mockDelService));
verifyTrackedResourceCount(tracker,1);
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testRecoveredResourceWithDirCacheMgr() throws Exception {
final String user="someuser";
final ApplicationId appId=ApplicationId.newInstance(1,1);
final Path localDirRoot=new Path("/tmp/localdir");
Configuration conf=new YarnConfiguration();
DrainDispatcher dispatcher=null;
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
NMStateStoreService stateStore=mock(NMStateStoreService.class);
try {
LocalResourcesTrackerImpl tracker=new LocalResourcesTrackerImpl(user,appId,dispatcher,true,conf,stateStore);
LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC);
Assert.assertNull(tracker.getLocalizedResource(lr1));
final long localizedId1=52;
Path hierarchicalPath1=new Path(localDirRoot + "/4/2",Long.toString(localizedId1));
Path localizedPath1=new Path(hierarchicalPath1,"resource.jar");
tracker.handle(new ResourceRecoveredEvent(lr1,localizedPath1,120));
dispatcher.await();
Assert.assertNotNull(tracker.getLocalizedResource(lr1));
LocalCacheDirectoryManager dirMgrRoot=tracker.getDirectoryManager(localDirRoot);
Assert.assertEquals(0,dirMgrRoot.getDirectory("").getCount());
Assert.assertEquals(1,dirMgrRoot.getDirectory("4/2").getCount());
LocalResourceRequest lr2=createLocalResourceRequest(user,2,2,LocalResourceVisibility.PUBLIC);
Assert.assertNull(tracker.getLocalizedResource(lr2));
final long localizedId2=localizedId1 + 1;
Path hierarchicalPath2=new Path(localDirRoot + "/4/2",Long.toString(localizedId2));
Path localizedPath2=new Path(hierarchicalPath2,"resource.jar");
tracker.handle(new ResourceRecoveredEvent(lr2,localizedPath2,120));
dispatcher.await();
Assert.assertNotNull(tracker.getLocalizedResource(lr2));
Assert.assertEquals(0,dirMgrRoot.getDirectory("").getCount());
Assert.assertEquals(2,dirMgrRoot.getDirectory("4/2").getCount());
LocalResourceRequest lr3=createLocalResourceRequest(user,3,3,LocalResourceVisibility.PUBLIC);
Assert.assertNull(tracker.getLocalizedResource(lr3));
final long localizedId3=128;
Path hierarchicalPath3=new Path(localDirRoot + "/4/3",Long.toString(localizedId3));
Path localizedPath3=new Path(hierarchicalPath3,"resource.jar");
tracker.handle(new ResourceRecoveredEvent(lr3,localizedPath3,120));
dispatcher.await();
Assert.assertNotNull(tracker.getLocalizedResource(lr3));
Assert.assertEquals(0,dirMgrRoot.getDirectory("").getCount());
Assert.assertEquals(2,dirMgrRoot.getDirectory("4/2").getCount());
Assert.assertEquals(1,dirMgrRoot.getDirectory("4/3").getCount());
LocalResourceRequest lr4=createLocalResourceRequest(user,4,4,LocalResourceVisibility.PUBLIC);
Assert.assertNull(tracker.getLocalizedResource(lr4));
final long localizedId4=256;
Path hierarchicalPath4=new Path(localDirRoot + "/4",Long.toString(localizedId4));
Path localizedPath4=new Path(hierarchicalPath4,"resource.jar");
tracker.handle(new ResourceRecoveredEvent(lr4,localizedPath4,120));
dispatcher.await();
Assert.assertNotNull(tracker.getLocalizedResource(lr4));
Assert.assertEquals(0,dirMgrRoot.getDirectory("").getCount());
Assert.assertEquals(1,dirMgrRoot.getDirectory("4").getCount());
Assert.assertEquals(2,dirMgrRoot.getDirectory("4/2").getCount());
Assert.assertEquals(1,dirMgrRoot.getDirectory("4/3").getCount());
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testRecoveredResource() throws Exception {
final String user="someuser";
final ApplicationId appId=ApplicationId.newInstance(1,1);
final Path localDir=new Path("/tmp/localdir");
Configuration conf=new YarnConfiguration();
DrainDispatcher dispatcher=null;
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
NMStateStoreService stateStore=mock(NMStateStoreService.class);
try {
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,appId,dispatcher,false,conf,stateStore);
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.APPLICATION);
Assert.assertNull(tracker.getLocalizedResource(lr1));
final long localizedId1=52;
Path hierarchicalPath1=new Path(localDir,Long.toString(localizedId1));
Path localizedPath1=new Path(hierarchicalPath1,"resource.jar");
tracker.handle(new ResourceRecoveredEvent(lr1,localizedPath1,120));
dispatcher.await();
Assert.assertNotNull(tracker.getLocalizedResource(lr1));
LocalResourceRequest lr2=createLocalResourceRequest(user,2,2,LocalResourceVisibility.APPLICATION);
LocalizerContext lc2=new LocalizerContext(user,cId1,null);
ResourceEvent reqEvent2=new ResourceRequestEvent(lr2,LocalResourceVisibility.APPLICATION,lc2);
tracker.handle(reqEvent2);
dispatcher.await();
Path hierarchicalPath2=tracker.getPathForLocalization(lr2,localDir);
long localizedId2=Long.parseLong(hierarchicalPath2.getName());
Assert.assertEquals(localizedId1 + 1,localizedId2);
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=1000) @SuppressWarnings("unchecked") public void testLocalResourceCache(){
String user="testuser";
DrainDispatcher dispatcher=null;
try {
Configuration conf=new Configuration();
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
ConcurrentMap localrsrc=new ConcurrentHashMap();
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,true,conf,new NMNullStateStoreService());
LocalResourceRequest lr=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC);
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalizerContext lc1=new LocalizerContext(user,cId1,null);
ResourceEvent reqEvent1=new ResourceRequestEvent(lr,LocalResourceVisibility.PRIVATE,lc1);
Assert.assertEquals(0,localrsrc.size());
tracker.handle(reqEvent1);
dispatcher.await();
Assert.assertEquals(1,localrsrc.size());
Assert.assertTrue(localrsrc.containsKey(lr));
Assert.assertEquals(1,localrsrc.get(lr).getRefCount());
Assert.assertTrue(localrsrc.get(lr).ref.contains(cId1));
Assert.assertEquals(ResourceState.DOWNLOADING,localrsrc.get(lr).getState());
ContainerId cId2=BuilderUtils.newContainerId(1,1,1,2);
LocalizerContext lc2=new LocalizerContext(user,cId2,null);
ResourceEvent reqEvent2=new ResourceRequestEvent(lr,LocalResourceVisibility.PRIVATE,lc2);
tracker.handle(reqEvent2);
dispatcher.await();
Assert.assertEquals(2,localrsrc.get(lr).getRefCount());
Assert.assertTrue(localrsrc.get(lr).ref.contains(cId2));
ResourceEvent resourceFailedEvent=new ResourceFailedLocalizationEvent(lr,(new Exception("test").getMessage()));
LocalizedResource localizedResource=localrsrc.get(lr);
tracker.handle(resourceFailedEvent);
dispatcher.await();
Assert.assertEquals(0,localrsrc.size());
verify(containerEventHandler,times(2)).handle(isA(ContainerResourceFailedEvent.class));
Assert.assertEquals(ResourceState.FAILED,localizedResource.getState());
ResourceReleaseEvent relEvent1=new ResourceReleaseEvent(lr,cId1);
tracker.handle(relEvent1);
dispatcher.await();
ContainerId cId3=BuilderUtils.newContainerId(1,1,1,3);
LocalizerContext lc3=new LocalizerContext(user,cId3,null);
ResourceEvent reqEvent3=new ResourceRequestEvent(lr,LocalResourceVisibility.PRIVATE,lc3);
tracker.handle(reqEvent3);
dispatcher.await();
Assert.assertEquals(1,localrsrc.size());
Assert.assertTrue(localrsrc.containsKey(lr));
Assert.assertEquals(1,localrsrc.get(lr).getRefCount());
Assert.assertTrue(localrsrc.get(lr).ref.contains(cId3));
ResourceReleaseEvent relEvent2=new ResourceReleaseEvent(lr,cId2);
tracker.handle(relEvent2);
dispatcher.await();
Assert.assertEquals(1,localrsrc.size());
Assert.assertTrue(localrsrc.containsKey(lr));
Assert.assertEquals(1,localrsrc.get(lr).getRefCount());
Assert.assertTrue(localrsrc.get(lr).ref.contains(cId3));
Path localizedPath=new Path("/tmp/file1");
ResourceLocalizedEvent localizedEvent=new ResourceLocalizedEvent(lr,localizedPath,123L);
tracker.handle(localizedEvent);
dispatcher.await();
verify(containerEventHandler,times(1)).handle(isA(ContainerResourceLocalizedEvent.class));
Assert.assertEquals(ResourceState.LOCALIZED,localrsrc.get(lr).getState());
Assert.assertEquals(1,localrsrc.get(lr).getRefCount());
ResourceReleaseEvent relEvent3=new ResourceReleaseEvent(lr,cId3);
tracker.handle(relEvent3);
dispatcher.await();
Assert.assertEquals(0,localrsrc.get(lr).getRefCount());
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings("unchecked") public void testStateStoreFailedLocalization() throws Exception {
final String user="someuser";
final ApplicationId appId=ApplicationId.newInstance(1,1);
final Path localDir=new Path("/tmp");
Configuration conf=new YarnConfiguration();
DrainDispatcher dispatcher=null;
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
NMStateStoreService stateStore=mock(NMStateStoreService.class);
try {
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,appId,dispatcher,false,conf,stateStore);
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.APPLICATION);
LocalizerContext lc1=new LocalizerContext(user,cId1,null);
ResourceEvent reqEvent1=new ResourceRequestEvent(lr1,LocalResourceVisibility.APPLICATION,lc1);
tracker.handle(reqEvent1);
dispatcher.await();
Path hierarchicalPath1=tracker.getPathForLocalization(lr1,localDir);
ArgumentCaptor localResourceCaptor=ArgumentCaptor.forClass(LocalResourceProto.class);
ArgumentCaptor pathCaptor=ArgumentCaptor.forClass(Path.class);
verify(stateStore).startResourceLocalization(eq(user),eq(appId),localResourceCaptor.capture(),pathCaptor.capture());
LocalResourceProto lrProto=localResourceCaptor.getValue();
Path localizedPath1=pathCaptor.getValue();
Assert.assertEquals(lr1,new LocalResourceRequest(new LocalResourcePBImpl(lrProto)));
Assert.assertEquals(hierarchicalPath1,localizedPath1.getParent());
ResourceFailedLocalizationEvent rfe1=new ResourceFailedLocalizationEvent(lr1,new Exception("Test").toString());
tracker.handle(rfe1);
dispatcher.await();
verify(stateStore).removeLocalizedResource(eq(user),eq(appId),eq(localizedPath1));
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) @SuppressWarnings("unchecked") public void testHierarchicalLocalCacheDirectories(){
String user="testuser";
DrainDispatcher dispatcher=null;
try {
Configuration conf=new Configuration();
conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"37");
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
DeletionService mockDelService=mock(DeletionService.class);
ConcurrentMap localrsrc=new ConcurrentHashMap();
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,true,conf,new NMNullStateStoreService());
Path localDir=new Path("/tmp");
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC);
LocalizerContext lc1=new LocalizerContext(user,cId1,null);
ResourceEvent reqEvent1=new ResourceRequestEvent(lr1,LocalResourceVisibility.PUBLIC,lc1);
tracker.handle(reqEvent1);
Path hierarchicalPath1=tracker.getPathForLocalization(lr1,localDir).getParent();
ResourceLocalizedEvent rle1=new ResourceLocalizedEvent(lr1,new Path(hierarchicalPath1.toUri().toString() + Path.SEPARATOR + "file1"),120);
tracker.handle(rle1);
LocalResourceRequest lr2=createLocalResourceRequest(user,3,3,LocalResourceVisibility.PUBLIC);
ResourceEvent reqEvent2=new ResourceRequestEvent(lr2,LocalResourceVisibility.PUBLIC,lc1);
tracker.handle(reqEvent2);
Path hierarchicalPath2=tracker.getPathForLocalization(lr2,localDir).getParent();
ResourceFailedLocalizationEvent rfe2=new ResourceFailedLocalizationEvent(lr2,new Exception("Test").toString());
tracker.handle(rfe2);
Assert.assertNotSame(hierarchicalPath1,hierarchicalPath2);
LocalResourceRequest lr3=createLocalResourceRequest(user,2,2,LocalResourceVisibility.PUBLIC);
ResourceEvent reqEvent3=new ResourceRequestEvent(lr3,LocalResourceVisibility.PUBLIC,lc1);
tracker.handle(reqEvent3);
Path hierarchicalPath3=tracker.getPathForLocalization(lr3,localDir).getParent();
ResourceLocalizedEvent rle3=new ResourceLocalizedEvent(lr3,new Path(hierarchicalPath3.toUri().toString() + Path.SEPARATOR + "file3"),120);
tracker.handle(rle3);
Assert.assertEquals(hierarchicalPath3.toUri().toString(),hierarchicalPath1.toUri().toString() + Path.SEPARATOR + "0");
ResourceEvent relEvent1=new ResourceReleaseEvent(lr1,cId1);
tracker.handle(relEvent1);
int resources=0;
Iterator iter=tracker.iterator();
while (iter.hasNext()) {
iter.next();
resources++;
}
Assert.assertEquals(2,resources);
iter=tracker.iterator();
while (iter.hasNext()) {
LocalizedResource rsrc=iter.next();
if (rsrc.getRefCount() == 0) {
Assert.assertTrue(tracker.remove(rsrc,mockDelService));
resources--;
}
}
Assert.assertEquals(1,resources);
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) @SuppressWarnings("unchecked") public void testConsistency(){
String user="testuser";
DrainDispatcher dispatcher=null;
try {
Configuration conf=new Configuration();
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalizerContext lc1=new LocalizerContext(user,cId1,null);
LocalResourceRequest req1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC);
LocalizedResource lr1=createLocalizedResource(req1,dispatcher);
ConcurrentMap localrsrc=new ConcurrentHashMap();
localrsrc.put(req1,lr1);
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,false,conf,new NMNullStateStoreService());
ResourceEvent req11Event=new ResourceRequestEvent(req1,LocalResourceVisibility.PUBLIC,lc1);
ResourceEvent rel11Event=new ResourceReleaseEvent(req1,cId1);
tracker.handle(req11Event);
dispatcher.await();
Assert.assertEquals(1,lr1.getRefCount());
dispatcher.await();
verifyTrackedResourceCount(tracker,1);
ResourceLocalizedEvent rle=new ResourceLocalizedEvent(req1,new Path("file:///tmp/r1"),1);
lr1.handle(rle);
Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED));
Assert.assertTrue(createdummylocalizefile(new Path("file:///tmp/r1")));
LocalizedResource rsrcbefore=tracker.iterator().next();
File resFile=new File(lr1.getLocalPath().toUri().getRawPath().toString());
Assert.assertTrue(resFile.exists());
Assert.assertTrue(resFile.delete());
tracker.handle(req11Event);
dispatcher.await();
lr1.handle(rle);
Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED));
LocalizedResource rsrcafter=tracker.iterator().next();
if (rsrcbefore == rsrcafter) {
Assert.fail("Localized resource should not be equal");
}
tracker.handle(rel11Event);
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testRecovery() throws Exception {
final String user1="user1";
final String user2="user2";
final ApplicationId appId1=ApplicationId.newInstance(1,1);
final ApplicationId appId2=ApplicationId.newInstance(1,2);
List localDirs=new ArrayList();
String[] sDirs=new String[4];
for (int i=0; i < 4; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir,i + "")));
sDirs[i]=localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true);
NMMemoryStateStoreService stateStore=new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
DrainDispatcher dispatcher=new DrainDispatcher();
dispatcher.init(conf);
dispatcher.start();
EventHandler applicationBus=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher.register(ContainerEventType.class,containerBus);
EventHandler localizerBus=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerBus);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
ResourceLocalizationService spyService=createSpyService(dispatcher,dirsHandler,stateStore);
try {
spyService.init(conf);
spyService.start();
final Application app1=mock(Application.class);
when(app1.getUser()).thenReturn(user1);
when(app1.getAppId()).thenReturn(appId1);
final Application app2=mock(Application.class);
when(app2.getUser()).thenReturn(user2);
when(app2.getAppId()).thenReturn(appId2);
spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app1));
spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app2));
dispatcher.await();
LocalResourcesTracker appTracker1=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user1,appId1);
LocalResourcesTracker privTracker1=spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,user1,null);
LocalResourcesTracker appTracker2=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user2,appId2);
LocalResourcesTracker pubTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,null,null);
final Container c1=getMockContainer(appId1,1,user1);
final Container c2=getMockContainer(appId2,2,user2);
Random r=new Random();
long seed=r.nextLong();
System.out.println("SEED: " + seed);
r.setSeed(seed);
final LocalResource privResource1=getPrivateMockedResource(r);
final LocalResourceRequest privReq1=new LocalResourceRequest(privResource1);
final LocalResource privResource2=getPrivateMockedResource(r);
final LocalResourceRequest privReq2=new LocalResourceRequest(privResource2);
final LocalResource pubResource1=getPublicMockedResource(r);
final LocalResourceRequest pubReq1=new LocalResourceRequest(pubResource1);
final LocalResource pubResource2=getPublicMockedResource(r);
final LocalResourceRequest pubReq2=new LocalResourceRequest(pubResource2);
final LocalResource appResource1=getAppMockedResource(r);
final LocalResourceRequest appReq1=new LocalResourceRequest(appResource1);
final LocalResource appResource2=getAppMockedResource(r);
final LocalResourceRequest appReq2=new LocalResourceRequest(appResource2);
final LocalResource appResource3=getAppMockedResource(r);
final LocalResourceRequest appReq3=new LocalResourceRequest(appResource3);
Map> req1=new HashMap>();
req1.put(LocalResourceVisibility.PRIVATE,Arrays.asList(new LocalResourceRequest[]{privReq1,privReq2}));
req1.put(LocalResourceVisibility.PUBLIC,Collections.singletonList(pubReq1));
req1.put(LocalResourceVisibility.APPLICATION,Collections.singletonList(appReq1));
Map> req2=new HashMap>();
req2.put(LocalResourceVisibility.APPLICATION,Arrays.asList(new LocalResourceRequest[]{appReq2,appReq3}));
req2.put(LocalResourceVisibility.PUBLIC,Collections.singletonList(pubReq2));
spyService.handle(new ContainerLocalizationRequestEvent(c1,req1));
spyService.handle(new ContainerLocalizationRequestEvent(c2,req2));
dispatcher.await();
privTracker1.getPathForLocalization(privReq1,dirsHandler.getLocalPathForWrite(ContainerLocalizer.USERCACHE + user1));
privTracker1.getPathForLocalization(privReq2,dirsHandler.getLocalPathForWrite(ContainerLocalizer.USERCACHE + user1));
LocalizedResource privLr1=privTracker1.getLocalizedResource(privReq1);
LocalizedResource privLr2=privTracker1.getLocalizedResource(privReq2);
appTracker1.getPathForLocalization(appReq1,dirsHandler.getLocalPathForWrite(ContainerLocalizer.APPCACHE + appId1));
LocalizedResource appLr1=appTracker1.getLocalizedResource(appReq1);
appTracker2.getPathForLocalization(appReq2,dirsHandler.getLocalPathForWrite(ContainerLocalizer.APPCACHE + appId2));
LocalizedResource appLr2=appTracker2.getLocalizedResource(appReq2);
appTracker2.getPathForLocalization(appReq3,dirsHandler.getLocalPathForWrite(ContainerLocalizer.APPCACHE + appId2));
LocalizedResource appLr3=appTracker2.getLocalizedResource(appReq3);
pubTracker.getPathForLocalization(pubReq1,dirsHandler.getLocalPathForWrite(ContainerLocalizer.FILECACHE));
LocalizedResource pubLr1=pubTracker.getLocalizedResource(pubReq1);
pubTracker.getPathForLocalization(pubReq2,dirsHandler.getLocalPathForWrite(ContainerLocalizer.FILECACHE));
LocalizedResource pubLr2=pubTracker.getLocalizedResource(pubReq2);
assertNotNull("Localization not started",privLr1.getLocalPath());
privTracker1.handle(new ResourceLocalizedEvent(privReq1,privLr1.getLocalPath(),privLr1.getSize() + 5));
assertNotNull("Localization not started",privLr2.getLocalPath());
privTracker1.handle(new ResourceLocalizedEvent(privReq2,privLr2.getLocalPath(),privLr2.getSize() + 10));
assertNotNull("Localization not started",appLr1.getLocalPath());
appTracker1.handle(new ResourceLocalizedEvent(appReq1,appLr1.getLocalPath(),appLr1.getSize()));
assertNotNull("Localization not started",appLr3.getLocalPath());
appTracker2.handle(new ResourceLocalizedEvent(appReq3,appLr3.getLocalPath(),appLr3.getSize() + 7));
assertNotNull("Localization not started",pubLr1.getLocalPath());
pubTracker.handle(new ResourceLocalizedEvent(pubReq1,pubLr1.getLocalPath(),pubLr1.getSize() + 1000));
assertNotNull("Localization not started",pubLr2.getLocalPath());
pubTracker.handle(new ResourceLocalizedEvent(pubReq2,pubLr2.getLocalPath(),pubLr2.getSize() + 99999));
dispatcher.await();
assertEquals(ResourceState.LOCALIZED,privLr1.getState());
assertEquals(ResourceState.LOCALIZED,privLr2.getState());
assertEquals(ResourceState.LOCALIZED,appLr1.getState());
assertEquals(ResourceState.DOWNLOADING,appLr2.getState());
assertEquals(ResourceState.LOCALIZED,appLr3.getState());
assertEquals(ResourceState.LOCALIZED,pubLr1.getState());
assertEquals(ResourceState.LOCALIZED,pubLr2.getState());
spyService=createSpyService(dispatcher,dirsHandler,stateStore);
spyService.init(conf);
spyService.recoverLocalizedResources(stateStore.loadLocalizationState());
dispatcher.await();
appTracker1=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user1,appId1);
privTracker1=spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,user1,null);
appTracker2=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user2,appId2);
pubTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,null,null);
LocalizedResource recoveredRsrc=privTracker1.getLocalizedResource(privReq1);
assertEquals(privReq1,recoveredRsrc.getRequest());
assertEquals(privLr1.getLocalPath(),recoveredRsrc.getLocalPath());
assertEquals(privLr1.getSize(),recoveredRsrc.getSize());
assertEquals(ResourceState.LOCALIZED,recoveredRsrc.getState());
recoveredRsrc=privTracker1.getLocalizedResource(privReq2);
assertEquals(privReq2,recoveredRsrc.getRequest());
assertEquals(privLr2.getLocalPath(),recoveredRsrc.getLocalPath());
assertEquals(privLr2.getSize(),recoveredRsrc.getSize());
assertEquals(ResourceState.LOCALIZED,recoveredRsrc.getState());
recoveredRsrc=appTracker1.getLocalizedResource(appReq1);
assertEquals(appReq1,recoveredRsrc.getRequest());
assertEquals(appLr1.getLocalPath(),recoveredRsrc.getLocalPath());
assertEquals(appLr1.getSize(),recoveredRsrc.getSize());
assertEquals(ResourceState.LOCALIZED,recoveredRsrc.getState());
recoveredRsrc=appTracker2.getLocalizedResource(appReq2);
assertNull("in-progress resource should not be present",recoveredRsrc);
recoveredRsrc=appTracker2.getLocalizedResource(appReq3);
assertEquals(appReq3,recoveredRsrc.getRequest());
assertEquals(appLr3.getLocalPath(),recoveredRsrc.getLocalPath());
assertEquals(appLr3.getSize(),recoveredRsrc.getSize());
assertEquals(ResourceState.LOCALIZED,recoveredRsrc.getState());
}
finally {
dispatcher.stop();
stateStore.close();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) @SuppressWarnings("unchecked") public void testParallelDownloadAttemptsForPrivateResource() throws Exception {
DrainDispatcher dispatcher1=null;
try {
dispatcher1=new DrainDispatcher();
String user="testuser";
ApplicationId appId=BuilderUtils.newApplicationId(1,1);
List localDirs=new ArrayList();
String[] sDirs=new String[1];
for (int i=0; i < 1; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir,i + "")));
sDirs[i]=localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
LocalDirsHandlerService localDirHandler=new LocalDirsHandlerService();
localDirHandler.init(conf);
EventHandler applicationBus=mock(EventHandler.class);
dispatcher1.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher1.register(ContainerEventType.class,containerBus);
ContainerExecutor exec=mock(ContainerExecutor.class);
DeletionService delService=mock(DeletionService.class);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
dispatcher1.init(conf);
dispatcher1.start();
ResourceLocalizationService rls=new ResourceLocalizationService(dispatcher1,exec,delService,localDirHandler,new NMNullStateStoreService());
dispatcher1.register(LocalizationEventType.class,rls);
rls.init(conf);
rls.handle(createApplicationLocalizationEvent(user,appId));
LocalResourceRequest req=new LocalResourceRequest(new Path("file:///tmp"),123L,LocalResourceType.FILE,LocalResourceVisibility.PRIVATE,"");
ContainerImpl container1=createMockContainer(user,1);
String localizerId1=container1.getContainerId().toString();
rls.getPrivateLocalizers().put(localizerId1,rls.new LocalizerRunner(new LocalizerContext(user,container1.getContainerId(),null),localizerId1));
LocalizerRunner localizerRunner1=rls.getLocalizerRunner(localizerId1);
dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container1,LocalResourceVisibility.PRIVATE,req));
Assert.assertTrue(waitForPrivateDownloadToStart(rls,localizerId1,1,200));
ContainerImpl container2=createMockContainer(user,2);
String localizerId2=container2.getContainerId().toString();
rls.getPrivateLocalizers().put(localizerId2,rls.new LocalizerRunner(new LocalizerContext(user,container2.getContainerId(),null),localizerId2));
LocalizerRunner localizerRunner2=rls.getLocalizerRunner(localizerId2);
dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container2,LocalResourceVisibility.PRIVATE,req));
Assert.assertTrue(waitForPrivateDownloadToStart(rls,localizerId2,1,200));
LocalResourcesTracker tracker=rls.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,user,appId);
LocalizedResource lr=tracker.getLocalizedResource(req);
Assert.assertEquals(ResourceState.DOWNLOADING,lr.getState());
Assert.assertEquals(1,lr.sem.availablePermits());
LocalizerHeartbeatResponse response1=rls.heartbeat(createLocalizerStatus(localizerId1));
Assert.assertEquals(1,localizerRunner1.scheduled.size());
Assert.assertEquals(req.getResource(),response1.getResourceSpecs().get(0).getResource().getResource());
Assert.assertEquals(0,lr.sem.availablePermits());
LocalizerHeartbeatResponse response2=rls.heartbeat(createLocalizerStatus(localizerId2));
Assert.assertEquals(0,localizerRunner2.scheduled.size());
Assert.assertEquals(0,response2.getResourceSpecs().size());
rls.heartbeat(createLocalizerStatusForFailedResource(localizerId1,req));
Assert.assertTrue(waitForResourceState(lr,rls,req,LocalResourceVisibility.PRIVATE,user,appId,ResourceState.FAILED,200));
Assert.assertTrue(lr.getState().equals(ResourceState.FAILED));
Assert.assertEquals(0,localizerRunner1.scheduled.size());
response2=rls.heartbeat(createLocalizerStatus(localizerId2));
Assert.assertEquals(0,localizerRunner2.scheduled.size());
Assert.assertEquals(0,localizerRunner2.pending.size());
Assert.assertEquals(0,response2.getResourceSpecs().size());
}
finally {
if (dispatcher1 != null) {
dispatcher1.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) @SuppressWarnings("unchecked") public void testParallelDownloadAttemptsForPublicResource() throws Exception {
DrainDispatcher dispatcher1=null;
String user="testuser";
try {
List localDirs=new ArrayList();
String[] sDirs=new String[1];
for (int i=0; i < 1; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir,i + "")));
sDirs[i]=localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
EventHandler applicationBus=mock(EventHandler.class);
dispatcher1=new DrainDispatcher();
dispatcher1.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher1.register(ContainerEventType.class,containerBus);
ContainerExecutor exec=mock(ContainerExecutor.class);
DeletionService delService=mock(DeletionService.class);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
dispatcher1.init(conf);
dispatcher1.start();
ResourceLocalizationService rawService=new ResourceLocalizationService(dispatcher1,exec,delService,dirsHandler,new NMNullStateStoreService());
ResourceLocalizationService spyService=spy(rawService);
dispatcher1.register(LocalizationEventType.class,spyService);
spyService.init(conf);
Assert.assertEquals(0,spyService.getPublicLocalizer().pending.size());
LocalResourceRequest req=new LocalResourceRequest(new Path("/tmp"),123L,LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,"");
ApplicationImpl app=mock(ApplicationImpl.class);
ApplicationId appId=BuilderUtils.newApplicationId(1,1);
when(app.getAppId()).thenReturn(appId);
when(app.getUser()).thenReturn(user);
dispatcher1.getEventHandler().handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app));
ContainerImpl container1=createMockContainer(user,1);
dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container1,LocalResourceVisibility.PUBLIC,req));
Assert.assertTrue(waitForResourceState(null,spyService,req,LocalResourceVisibility.PUBLIC,user,null,ResourceState.DOWNLOADING,200));
Assert.assertTrue(waitForPublicDownloadToStart(spyService,1,200));
LocalizedResource lr=getLocalizedResource(spyService,req,LocalResourceVisibility.PUBLIC,user,null);
Assert.assertEquals(ResourceState.DOWNLOADING,lr.getState());
Assert.assertEquals(1,spyService.getPublicLocalizer().pending.size());
Assert.assertEquals(0,lr.sem.availablePermits());
ContainerImpl container2=createMockContainer(user,2);
dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container2,LocalResourceVisibility.PUBLIC,req));
Assert.assertFalse(waitForPublicDownloadToStart(spyService,2,100));
ResourceFailedLocalizationEvent locFailedEvent=new ResourceFailedLocalizationEvent(req,new Exception("test").toString());
spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,user,null).handle(locFailedEvent);
Assert.assertTrue(waitForResourceState(lr,spyService,req,LocalResourceVisibility.PUBLIC,user,null,ResourceState.FAILED,200));
lr.unlock();
spyService.getPublicLocalizer().pending.clear();
LocalizerResourceRequestEvent localizerEvent=new LocalizerResourceRequestEvent(lr,null,mock(LocalizerContext.class),null);
dispatcher1.getEventHandler().handle(localizerEvent);
Assert.assertFalse(waitForPublicDownloadToStart(spyService,1,100));
Assert.assertEquals(1,lr.sem.availablePermits());
}
finally {
if (dispatcher1 != null) {
dispatcher1.stop();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) @SuppressWarnings("unchecked") public void testLocalResourcePath() throws Exception {
DrainDispatcher dispatcher1=null;
try {
dispatcher1=new DrainDispatcher();
String user="testuser";
ApplicationId appId=BuilderUtils.newApplicationId(1,1);
List localDirs=new ArrayList();
String[] sDirs=new String[1];
for (int i=0; i < 1; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir,i + "")));
sDirs[i]=localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
LocalDirsHandlerService localDirHandler=new LocalDirsHandlerService();
localDirHandler.init(conf);
EventHandler applicationBus=mock(EventHandler.class);
dispatcher1.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher1.register(ContainerEventType.class,containerBus);
ContainerExecutor exec=mock(ContainerExecutor.class);
DeletionService delService=mock(DeletionService.class);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
dispatcher1.init(conf);
dispatcher1.start();
ResourceLocalizationService rls=new ResourceLocalizationService(dispatcher1,exec,delService,localDirHandler,new NMNullStateStoreService());
dispatcher1.register(LocalizationEventType.class,rls);
rls.init(conf);
rls.handle(createApplicationLocalizationEvent(user,appId));
Container container1=createMockContainer(user,1);
String localizerId1=container1.getContainerId().toString();
rls.getPrivateLocalizers().put(localizerId1,rls.new LocalizerRunner(new LocalizerContext(user,container1.getContainerId(),null),localizerId1));
LocalResourceRequest reqPriv=new LocalResourceRequest(new Path("file:///tmp1"),123L,LocalResourceType.FILE,LocalResourceVisibility.PRIVATE,"");
List privList=new ArrayList();
privList.add(reqPriv);
LocalResourceRequest reqApp=new LocalResourceRequest(new Path("file:///tmp2"),123L,LocalResourceType.FILE,LocalResourceVisibility.APPLICATION,"");
List appList=new ArrayList();
appList.add(reqApp);
Map> rsrcs=new HashMap>();
rsrcs.put(LocalResourceVisibility.APPLICATION,appList);
rsrcs.put(LocalResourceVisibility.PRIVATE,privList);
dispatcher1.getEventHandler().handle(new ContainerLocalizationRequestEvent(container1,rsrcs));
Assert.assertTrue(waitForPrivateDownloadToStart(rls,localizerId1,2,500));
String userCachePath=StringUtils.join(Path.SEPARATOR,Arrays.asList(localDirs.get(0).toUri().getRawPath(),ContainerLocalizer.USERCACHE,user,ContainerLocalizer.FILECACHE));
String userAppCachePath=StringUtils.join(Path.SEPARATOR,Arrays.asList(localDirs.get(0).toUri().getRawPath(),ContainerLocalizer.USERCACHE,user,ContainerLocalizer.APPCACHE,appId.toString(),ContainerLocalizer.FILECACHE));
int returnedResources=0;
boolean appRsrc=false, privRsrc=false;
while (returnedResources < 2) {
LocalizerHeartbeatResponse response=rls.heartbeat(createLocalizerStatus(localizerId1));
for ( ResourceLocalizationSpec resourceSpec : response.getResourceSpecs()) {
returnedResources++;
Path destinationDirectory=new Path(resourceSpec.getDestinationDirectory().getFile());
if (resourceSpec.getResource().getVisibility() == LocalResourceVisibility.APPLICATION) {
appRsrc=true;
Assert.assertEquals(userAppCachePath,destinationDirectory.getParent().toUri().toString());
}
else if (resourceSpec.getResource().getVisibility() == LocalResourceVisibility.PRIVATE) {
privRsrc=true;
Assert.assertEquals(userCachePath,destinationDirectory.getParent().toUri().toString());
}
else {
throw new Exception("Unexpected resource recevied.");
}
}
}
Assert.assertTrue(appRsrc && privRsrc);
}
finally {
if (dispatcher1 != null) {
dispatcher1.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test @SuppressWarnings("unchecked") public void testPublicResourceAddResourceExceptions() throws Exception {
List localDirs=new ArrayList();
String[] sDirs=new String[4];
for (int i=0; i < 4; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir,i + "")));
sDirs[i]=localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY,true);
DrainDispatcher dispatcher=new DrainDispatcher();
EventHandler applicationBus=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher.register(ContainerEventType.class,containerBus);
ContainerExecutor exec=mock(ContainerExecutor.class);
DeletionService delService=mock(DeletionService.class);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
LocalDirsHandlerService dirsHandlerSpy=spy(dirsHandler);
dirsHandlerSpy.init(conf);
dispatcher.init(conf);
dispatcher.start();
try {
ResourceLocalizationService rawService=new ResourceLocalizationService(dispatcher,exec,delService,dirsHandlerSpy,new NMNullStateStoreService());
ResourceLocalizationService spyService=spy(rawService);
doReturn(mockServer).when(spyService).createServer();
doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class));
spyService.init(conf);
spyService.start();
final String user="user0";
final Application app=mock(Application.class);
final ApplicationId appId=BuilderUtils.newApplicationId(314159265358979L,3);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app));
dispatcher.await();
Random r=new Random();
r.setSeed(r.nextLong());
final LocalResource pubResource=getPublicMockedResource(r);
final LocalResourceRequest pubReq=new LocalResourceRequest(pubResource);
Map> req=new HashMap>();
req.put(LocalResourceVisibility.PUBLIC,Collections.singletonList(pubReq));
final Container c=getMockContainer(appId,42,user);
Mockito.doThrow(new IOException()).when(dirsHandlerSpy).getLocalPathForWrite(isA(String.class),Mockito.anyLong(),Mockito.anyBoolean());
spyService.handle(new ContainerLocalizationRequestEvent(c,req));
dispatcher.await();
LocalResourcesTracker tracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,user,appId);
Assert.assertNull(tracker.getLocalizedResource(pubReq));
Mockito.doCallRealMethod().when(dirsHandlerSpy).getLocalPathForWrite(isA(String.class),Mockito.anyLong(),Mockito.anyBoolean());
PublicLocalizer publicLocalizer=spyService.getPublicLocalizer();
publicLocalizer.threadPool.shutdown();
spyService.handle(new ContainerLocalizationRequestEvent(c,req));
dispatcher.await();
tracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,user,appId);
Assert.assertNull(tracker.getLocalizedResource(pubReq));
}
finally {
dispatcher.await();
dispatcher.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) @SuppressWarnings("unchecked") public void testLocalizationHeartbeat() throws Exception {
List localDirs=new ArrayList();
String[] sDirs=new String[1];
localDirs.add(lfs.makeQualified(new Path(basedir,0 + "")));
sDirs[0]=localDirs.get(0).toString();
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"37");
DrainDispatcher dispatcher=new DrainDispatcher();
dispatcher.init(conf);
dispatcher.start();
EventHandler applicationBus=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher.register(ContainerEventType.class,containerBus);
ContainerExecutor exec=mock(ContainerExecutor.class);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
DeletionService delServiceReal=new DeletionService(exec);
DeletionService delService=spy(delServiceReal);
delService.init(new Configuration());
delService.start();
ResourceLocalizationService rawService=new ResourceLocalizationService(dispatcher,exec,delService,dirsHandler,new NMNullStateStoreService());
ResourceLocalizationService spyService=spy(rawService);
doReturn(mockServer).when(spyService).createServer();
doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class));
try {
spyService.init(conf);
spyService.start();
final Application app=mock(Application.class);
final ApplicationId appId=BuilderUtils.newApplicationId(314159265358979L,3);
when(app.getUser()).thenReturn("user0");
when(app.getAppId()).thenReturn(appId);
spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app));
ArgumentMatcher matchesAppInit=new ArgumentMatcher(){
@Override public boolean matches( Object o){
ApplicationEvent evt=(ApplicationEvent)o;
return evt.getType() == ApplicationEventType.APPLICATION_INITED && appId == evt.getApplicationID();
}
}
;
dispatcher.await();
verify(applicationBus).handle(argThat(matchesAppInit));
Random r=new Random();
long seed=r.nextLong();
System.out.println("SEED: " + seed);
r.setSeed(seed);
final Container c=getMockContainer(appId,42,"user0");
FSDataOutputStream out=new FSDataOutputStream(new DataOutputBuffer(),null);
doReturn(out).when(spylfs).createInternal(isA(Path.class),isA(EnumSet.class),isA(FsPermission.class),anyInt(),anyShort(),anyLong(),isA(Progressable.class),isA(ChecksumOpt.class),anyBoolean());
final LocalResource resource1=getPrivateMockedResource(r);
LocalResource resource2=null;
do {
resource2=getPrivateMockedResource(r);
}
while (resource2 == null || resource2.equals(resource1));
final LocalResourceRequest req1=new LocalResourceRequest(resource1);
final LocalResourceRequest req2=new LocalResourceRequest(resource2);
Map> rsrcs=new HashMap>();
List privateResourceList=new ArrayList();
privateResourceList.add(req1);
privateResourceList.add(req2);
rsrcs.put(LocalResourceVisibility.PRIVATE,privateResourceList);
spyService.handle(new ContainerLocalizationRequestEvent(c,rsrcs));
Thread.sleep(1000);
dispatcher.await();
String appStr=ConverterUtils.toString(appId);
String ctnrStr=c.getContainerId().toString();
ArgumentCaptor tokenPathCaptor=ArgumentCaptor.forClass(Path.class);
verify(exec).startLocalizer(tokenPathCaptor.capture(),isA(InetSocketAddress.class),eq("user0"),eq(appStr),eq(ctnrStr),isA(List.class),isA(List.class));
Path localizationTokenPath=tokenPathCaptor.getValue();
LocalResourceStatus rsrcStat1=mock(LocalResourceStatus.class);
LocalResourceStatus rsrcStat2=mock(LocalResourceStatus.class);
LocalizerStatus stat=mock(LocalizerStatus.class);
when(stat.getLocalizerId()).thenReturn(ctnrStr);
when(rsrcStat1.getResource()).thenReturn(resource1);
when(rsrcStat2.getResource()).thenReturn(resource2);
when(rsrcStat1.getLocalSize()).thenReturn(4344L);
when(rsrcStat2.getLocalSize()).thenReturn(2342L);
URL locPath=getPath("/cache/private/blah");
when(rsrcStat1.getLocalPath()).thenReturn(locPath);
when(rsrcStat2.getLocalPath()).thenReturn(locPath);
when(rsrcStat1.getStatus()).thenReturn(ResourceStatusType.FETCH_SUCCESS);
when(rsrcStat2.getStatus()).thenReturn(ResourceStatusType.FETCH_SUCCESS);
when(stat.getResources()).thenReturn(Collections.emptyList()).thenReturn(Collections.singletonList(rsrcStat1)).thenReturn(Collections.singletonList(rsrcStat2)).thenReturn(Collections.emptyList());
String localPath=Path.SEPARATOR + ContainerLocalizer.USERCACHE + Path.SEPARATOR+ "user0"+ Path.SEPARATOR+ ContainerLocalizer.FILECACHE;
LocalizerHeartbeatResponse response=spyService.heartbeat(stat);
assertEquals(LocalizerAction.LIVE,response.getLocalizerAction());
assertEquals(1,response.getResourceSpecs().size());
assertEquals(req1,new LocalResourceRequest(response.getResourceSpecs().get(0).getResource()));
URL localizedPath=response.getResourceSpecs().get(0).getDestinationDirectory();
assertTrue(localizedPath.getFile().endsWith(localPath + Path.SEPARATOR + "10"));
response=spyService.heartbeat(stat);
assertEquals(LocalizerAction.LIVE,response.getLocalizerAction());
assertEquals(1,response.getResourceSpecs().size());
assertEquals(req2,new LocalResourceRequest(response.getResourceSpecs().get(0).getResource()));
localizedPath=response.getResourceSpecs().get(0).getDestinationDirectory();
assertTrue(localizedPath.getFile().endsWith(localPath + Path.SEPARATOR + "0"+ Path.SEPARATOR+ "11"));
response=spyService.heartbeat(stat);
assertEquals(LocalizerAction.LIVE,response.getLocalizerAction());
assertEquals(0,response.getResourceSpecs().size());
response=spyService.heartbeat(stat);
assertEquals(LocalizerAction.DIE,response.getLocalizerAction());
dispatcher.await();
ArgumentMatcher matchesContainerLoc=new ArgumentMatcher(){
@Override public boolean matches( Object o){
ContainerEvent evt=(ContainerEvent)o;
return evt.getType() == ContainerEventType.RESOURCE_LOCALIZED && c.getContainerId() == evt.getContainerID();
}
}
;
verify(containerBus,times(2)).handle(argThat(matchesContainerLoc));
verify(delService).delete((String)isNull(),eq(localizationTokenPath));
}
finally {
spyService.stop();
dispatcher.stop();
delService.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings("unchecked") public void testResourceRelease() throws Exception {
List localDirs=new ArrayList();
String[] sDirs=new String[4];
for (int i=0; i < 4; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir,i + "")));
sDirs[i]=localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
LocalizerTracker mockLocallilzerTracker=mock(LocalizerTracker.class);
DrainDispatcher dispatcher=new DrainDispatcher();
dispatcher.init(conf);
dispatcher.start();
EventHandler applicationBus=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher.register(ContainerEventType.class,containerBus);
EventHandler localizerBus=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerBus);
ContainerExecutor exec=mock(ContainerExecutor.class);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
DeletionService delService=new DeletionService(exec);
delService.init(new Configuration());
delService.start();
ResourceLocalizationService rawService=new ResourceLocalizationService(dispatcher,exec,delService,dirsHandler,new NMNullStateStoreService());
ResourceLocalizationService spyService=spy(rawService);
doReturn(mockServer).when(spyService).createServer();
doReturn(mockLocallilzerTracker).when(spyService).createLocalizerTracker(isA(Configuration.class));
doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class));
try {
spyService.init(conf);
spyService.start();
final String user="user0";
final Application app=mock(Application.class);
final ApplicationId appId=BuilderUtils.newApplicationId(314159265358979L,3);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app));
dispatcher.await();
LocalResourcesTracker appTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user,appId);
LocalResourcesTracker privTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,user,appId);
LocalResourcesTracker pubTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,user,appId);
final Container c=getMockContainer(appId,42,user);
Random r=new Random();
long seed=r.nextLong();
System.out.println("SEED: " + seed);
r.setSeed(seed);
final LocalResource privResource=getPrivateMockedResource(r);
final LocalResourceRequest privReq=new LocalResourceRequest(privResource);
final LocalResource pubResource=getPublicMockedResource(r);
final LocalResourceRequest pubReq=new LocalResourceRequest(pubResource);
final LocalResource pubResource2=getPublicMockedResource(r);
final LocalResourceRequest pubReq2=new LocalResourceRequest(pubResource2);
final LocalResource appResource=getAppMockedResource(r);
final LocalResourceRequest appReq=new LocalResourceRequest(appResource);
Map> req=new HashMap>();
req.put(LocalResourceVisibility.PRIVATE,Collections.singletonList(privReq));
req.put(LocalResourceVisibility.PUBLIC,Collections.singletonList(pubReq));
req.put(LocalResourceVisibility.APPLICATION,Collections.singletonList(appReq));
Map> req2=new HashMap>();
req2.put(LocalResourceVisibility.PRIVATE,Collections.singletonList(privReq));
req2.put(LocalResourceVisibility.PUBLIC,Collections.singletonList(pubReq2));
Set pubRsrcs=new HashSet();
pubRsrcs.add(pubReq);
pubRsrcs.add(pubReq2);
spyService.handle(new ContainerLocalizationRequestEvent(c,req));
spyService.handle(new ContainerLocalizationRequestEvent(c,req2));
dispatcher.await();
int privRsrcCount=0;
for ( LocalizedResource lr : privTracker) {
privRsrcCount++;
Assert.assertEquals("Incorrect reference count",2,lr.getRefCount());
Assert.assertEquals(privReq,lr.getRequest());
}
Assert.assertEquals(1,privRsrcCount);
int pubRsrcCount=0;
for ( LocalizedResource lr : pubTracker) {
pubRsrcCount++;
Assert.assertEquals("Incorrect reference count",1,lr.getRefCount());
pubRsrcs.remove(lr.getRequest());
}
Assert.assertEquals(0,pubRsrcs.size());
Assert.assertEquals(2,pubRsrcCount);
int appRsrcCount=0;
for ( LocalizedResource lr : appTracker) {
appRsrcCount++;
Assert.assertEquals("Incorrect reference count",1,lr.getRefCount());
Assert.assertEquals(appReq,lr.getRequest());
}
Assert.assertEquals(1,appRsrcCount);
spyService.handle(new ContainerLocalizationCleanupEvent(c,req));
verify(mockLocallilzerTracker).cleanupPrivLocalizers("container_314159265358979_0003_01_000042");
req2.remove(LocalResourceVisibility.PRIVATE);
spyService.handle(new ContainerLocalizationCleanupEvent(c,req2));
dispatcher.await();
pubRsrcs.add(pubReq);
pubRsrcs.add(pubReq2);
privRsrcCount=0;
for ( LocalizedResource lr : privTracker) {
privRsrcCount++;
Assert.assertEquals("Incorrect reference count",1,lr.getRefCount());
Assert.assertEquals(privReq,lr.getRequest());
}
Assert.assertEquals(1,privRsrcCount);
pubRsrcCount=0;
for ( LocalizedResource lr : pubTracker) {
pubRsrcCount++;
Assert.assertEquals("Incorrect reference count",0,lr.getRefCount());
pubRsrcs.remove(lr.getRequest());
}
Assert.assertEquals(0,pubRsrcs.size());
Assert.assertEquals(2,pubRsrcCount);
appRsrcCount=0;
for ( LocalizedResource lr : appTracker) {
appRsrcCount++;
Assert.assertEquals("Incorrect reference count",0,lr.getRefCount());
Assert.assertEquals(appReq,lr.getRequest());
}
Assert.assertEquals(1,appRsrcCount);
}
finally {
dispatcher.stop();
delService.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testLocalFileDeletionAfterUpload() throws Exception {
this.delSrvc=new DeletionService(createContainerExecutor());
delSrvc=spy(delSrvc);
this.delSrvc.init(conf);
this.conf.set(YarnConfiguration.NM_LOG_DIRS,localLogDir.getAbsolutePath());
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,this.remoteRootLogDir.getAbsolutePath());
DrainDispatcher dispatcher=createDispatcher();
EventHandler appEventHandler=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,appEventHandler);
LogAggregationService logAggregationService=spy(new LogAggregationService(dispatcher,this.context,this.delSrvc,super.dirsHandler));
logAggregationService.init(this.conf);
logAggregationService.start();
ApplicationId application1=BuilderUtils.newApplicationId(1234,1);
File app1LogDir=new File(localLogDir,ConverterUtils.toString(application1));
app1LogDir.mkdir();
logAggregationService.handle(new LogHandlerAppStartedEvent(application1,this.user,null,ContainerLogsRetentionPolicy.ALL_CONTAINERS,this.acls));
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(application1,1);
ContainerId container11=BuilderUtils.newContainerId(appAttemptId,1);
writeContainerLogs(app1LogDir,container11);
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container11,0));
logAggregationService.handle(new LogHandlerAppFinishedEvent(application1));
logAggregationService.stop();
assertEquals(0,logAggregationService.getNumAggregators());
verify(logAggregationService).closeFileSystems(any(UserGroupInformation.class));
verify(delSrvc).delete(eq(user),eq((Path)null),eq(new Path(app1LogDir.getAbsolutePath())));
delSrvc.stop();
String containerIdStr=ConverterUtils.toString(container11);
File containerLogDir=new File(app1LogDir,containerIdStr);
for ( String fileType : new String[]{"stdout","stderr","syslog"}) {
File f=new File(containerLogDir,fileType);
Assert.assertFalse("check " + f,f.exists());
}
Assert.assertFalse(app1LogDir.exists());
Path logFilePath=logAggregationService.getRemoteNodeLogFileForApp(application1,this.user);
Assert.assertTrue("Log file [" + logFilePath + "] not found",new File(logFilePath.toUri().getPath()).exists());
dispatcher.await();
ApplicationEvent expectedEvents[]=new ApplicationEvent[]{new ApplicationEvent(appAttemptId.getApplicationId(),ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),new ApplicationEvent(appAttemptId.getApplicationId(),ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)};
checkEvents(appEventHandler,expectedEvents,true,"getType","getApplicationID");
dispatcher.stop();
}
APIUtilityVerifier BooleanVerifier
@Test public void testVerifyAndCreateRemoteDirNonExistence() throws Exception {
this.conf.set(YarnConfiguration.NM_LOG_DIRS,localLogDir.getAbsolutePath());
File aNewFile=new File(String.valueOf("tmp" + System.currentTimeMillis()));
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,aNewFile.getAbsolutePath());
DrainDispatcher dispatcher=createDispatcher();
LogAggregationService logAggregationService=spy(new LogAggregationService(dispatcher,this.context,this.delSrvc,super.dirsHandler));
logAggregationService.init(this.conf);
boolean existsBefore=aNewFile.exists();
assertTrue("The new file already exists!",!existsBefore);
logAggregationService.verifyAndCreateRemoteLogDir(this.conf);
boolean existsAfter=aNewFile.exists();
assertTrue("The new aggregate file is not successfully created",existsAfter);
aNewFile.delete();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test to verify the check for whether a process tree is over limit or not.
* @throws IOExceptionif there was a problem setting up the fake procfs directories or
* files.
*/
@Test public void testProcessTreeLimits() throws IOException {
File procfsRootDir=new File(localDir,"proc");
String[] pids={"100","200","300","400","500","600","700"};
try {
TestProcfsBasedProcessTree.setupProcfsRootDir(procfsRootDir);
TestProcfsBasedProcessTree.setupPidDirs(procfsRootDir,pids);
TestProcfsBasedProcessTree.ProcessStatInfo[] procs=new TestProcfsBasedProcessTree.ProcessStatInfo[7];
procs[0]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"100","proc1","1","100","100","100000"});
procs[1]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"200","proc2","1","200","200","200000"});
procs[2]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"300","proc3","200","200","200","300000"});
procs[3]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"400","proc4","200","200","200","400000"});
procs[4]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"500","proc5","100","100","100","1500000"});
procs[5]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"600","proc6","1","600","600","100000"});
procs[6]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"700","proc7","600","600","600","100000"});
TestProcfsBasedProcessTree.writeStatFiles(procfsRootDir,pids,procs,null);
long limit=700000;
ContainersMonitorImpl test=new ContainersMonitorImpl(null,null,null);
ProcfsBasedProcessTree pTree=new ProcfsBasedProcessTree("100",procfsRootDir.getAbsolutePath());
pTree.updateProcessTree();
assertTrue("tree rooted at 100 should be over limit " + "after first iteration.",test.isProcessTreeOverLimit(pTree,"dummyId",limit));
pTree=new ProcfsBasedProcessTree("200",procfsRootDir.getAbsolutePath());
pTree.updateProcessTree();
assertFalse("tree rooted at 200 shouldn't be over limit " + "after one iteration.",test.isProcessTreeOverLimit(pTree,"dummyId",limit));
pTree.updateProcessTree();
assertTrue("tree rooted at 200 should be over limit after 2 iterations",test.isProcessTreeOverLimit(pTree,"dummyId",limit));
pTree=new ProcfsBasedProcessTree("600",procfsRootDir.getAbsolutePath());
pTree.updateProcessTree();
assertFalse("tree rooted at 600 should never be over limit.",test.isProcessTreeOverLimit(pTree,"dummyId",limit));
pTree.updateProcessTree();
assertFalse("tree rooted at 600 should never be over limit.",test.isProcessTreeOverLimit(pTree,"dummyId",limit));
}
finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerKillOnMemoryOverflow() throws IOException, InterruptedException, YarnException {
if (!ProcfsBasedProcessTree.isAvailable()) {
return;
}
containerManager.start();
File scriptFile=new File(tmpDir,"scriptFile.sh");
PrintWriter fileWriter=new PrintWriter(scriptFile);
File processStartFile=new File(tmpDir,"start_file.txt").getAbsoluteFile();
fileWriter.write("\numask 0");
fileWriter.write("\necho Hello World! > " + processStartFile);
fileWriter.write("\necho $$ >> " + processStartFile);
fileWriter.write("\nsleep 15");
fileWriter.close();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
ApplicationId appId=ApplicationId.newInstance(0,0);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId cId=ContainerId.newInstance(appAttemptId,0);
int port=12345;
URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(scriptFile.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
List commands=new ArrayList();
commands.add("/bin/bash");
commands.add(scriptFile.getAbsolutePath());
containerLaunchContext.setCommands(commands);
Resource r=BuilderUtils.newResource(8 * 1024 * 1024,1);
ContainerTokenIdentifier containerIdentifier=new ContainerTokenIdentifier(cId,context.getNodeId().toString(),user,r,System.currentTimeMillis() + 120000,123,DUMMY_RM_IDENTIFIER,Priority.newInstance(0),0);
Token containerToken=BuilderUtils.newContainerToken(context.getNodeId(),containerManager.getContext().getContainerTokenSecretManager().createPassword(containerIdentifier),containerIdentifier);
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,containerToken);
List list=new ArrayList();
list.add(scRequest);
StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
int timeoutSecs=0;
while (!processStartFile.exists() && timeoutSecs++ < 20) {
Thread.sleep(1000);
LOG.info("Waiting for process start-file to be created");
}
Assert.assertTrue("ProcessStartFile doesn't exist!",processStartFile.exists());
BufferedReader reader=new BufferedReader(new FileReader(processStartFile));
Assert.assertEquals("Hello World!",reader.readLine());
String pid=reader.readLine().trim();
Assert.assertEquals(null,reader.readLine());
BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE,60);
List containerIds=new ArrayList();
containerIds.add(cId);
GetContainerStatusesRequest gcsRequest=GetContainerStatusesRequest.newInstance(containerIds);
ContainerStatus containerStatus=containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
Assert.assertEquals(ContainerExitStatus.KILLED_EXCEEDED_VMEM,containerStatus.getExitStatus());
String expectedMsgPattern="Container \\[pid=" + pid + ",containerID="+ cId+ "\\] is running beyond virtual memory limits. Current usage: "+ "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B physical memory used; "+ "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B virtual memory used. "+ "Killing container.\nDump of the process-tree for "+ cId+ " :\n";
Pattern pat=Pattern.compile(expectedMsgPattern);
Assert.assertEquals("Expected message pattern is: " + expectedMsgPattern + "\n\nObserved message is: "+ containerStatus.getDiagnostics(),true,pat.matcher(containerStatus.getDiagnostics()).find());
Assert.assertFalse("Process is still alive!",exec.signalContainer(user,pid,Signal.NULL));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRemoveLocalizedResource() throws IOException {
String user="somebody";
ApplicationId appId=ApplicationId.newInstance(1,1);
Path appRsrcPath=new Path("hdfs://some/app/resource");
LocalResourcePBImpl rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(appRsrcPath),LocalResourceType.ARCHIVE,LocalResourceVisibility.APPLICATION,123L,456L);
LocalResourceProto appRsrcProto=rsrcPb.getProto();
Path appRsrcLocalPath=new Path("/some/local/dir/for/apprsrc");
stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath);
LocalizedResourceProto appLocalizedProto=LocalizedResourceProto.newBuilder().setResource(appRsrcProto).setLocalPath(appRsrcLocalPath.toString()).setSize(1234567L).build();
stateStore.finishResourceLocalization(user,appId,appLocalizedProto);
stateStore.removeLocalizedResource(user,appId,appRsrcLocalPath);
restartStateStore();
verifyEmptyState();
stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath);
stateStore.removeLocalizedResource(user,appId,appRsrcLocalPath);
restartStateStore();
verifyEmptyState();
Path pubRsrcPath1=new Path("hdfs://some/public/resource1");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto1=rsrcPb.getProto();
Path pubRsrcLocalPath1=new Path("/some/local/dir/for/pubrsrc1");
stateStore.startResourceLocalization(null,null,pubRsrcProto1,pubRsrcLocalPath1);
LocalizedResourceProto pubLocalizedProto1=LocalizedResourceProto.newBuilder().setResource(pubRsrcProto1).setLocalPath(pubRsrcLocalPath1.toString()).setSize(789L).build();
stateStore.finishResourceLocalization(null,null,pubLocalizedProto1);
Path pubRsrcPath2=new Path("hdfs://some/public/resource2");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto2=rsrcPb.getProto();
Path pubRsrcLocalPath2=new Path("/some/local/dir/for/pubrsrc2");
stateStore.startResourceLocalization(null,null,pubRsrcProto2,pubRsrcLocalPath2);
LocalizedResourceProto pubLocalizedProto2=LocalizedResourceProto.newBuilder().setResource(pubRsrcProto2).setLocalPath(pubRsrcLocalPath2.toString()).setSize(7654321L).build();
stateStore.finishResourceLocalization(null,null,pubLocalizedProto2);
stateStore.removeLocalizedResource(null,null,pubRsrcLocalPath2);
Path privRsrcPath=new Path("hdfs://some/private/resource");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(privRsrcPath),LocalResourceType.PATTERN,LocalResourceVisibility.PRIVATE,789L,680L,"*pattern*");
LocalResourceProto privRsrcProto=rsrcPb.getProto();
Path privRsrcLocalPath=new Path("/some/local/dir/for/privrsrc");
stateStore.startResourceLocalization(user,null,privRsrcProto,privRsrcLocalPath);
stateStore.removeLocalizedResource(user,null,privRsrcLocalPath);
restartStateStore();
RecoveredLocalizationState state=stateStore.loadLocalizationState();
LocalResourceTrackerState pubts=state.getPublicTrackerState();
assertTrue(pubts.getInProgressResources().isEmpty());
assertEquals(1,pubts.getLocalizedResources().size());
assertEquals(pubLocalizedProto1,pubts.getLocalizedResources().iterator().next());
Map userResources=state.getUserResources();
assertTrue(userResources.isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerStorage() throws IOException {
List recoveredContainers=stateStore.loadContainersState();
assertTrue(recoveredContainers.isEmpty());
ApplicationId appId=ApplicationId.newInstance(1234,3);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,4);
ContainerId containerId=ContainerId.newInstance(appAttemptId,5);
LocalResource lrsrc=LocalResource.newInstance(URL.newInstance("hdfs","somehost",12345,"/some/path/to/rsrc"),LocalResourceType.FILE,LocalResourceVisibility.APPLICATION,123L,1234567890L);
Map localResources=new HashMap();
localResources.put("rsrc",lrsrc);
Map env=new HashMap();
env.put("somevar","someval");
List containerCmds=new ArrayList();
containerCmds.add("somecmd");
containerCmds.add("somearg");
Map serviceData=new HashMap();
serviceData.put("someservice",ByteBuffer.wrap(new byte[]{0x1,0x2,0x3}));
ByteBuffer containerTokens=ByteBuffer.wrap(new byte[]{0x7,0x8,0x9,0xa});
Map acls=new HashMap();
acls.put(ApplicationAccessType.VIEW_APP,"viewuser");
acls.put(ApplicationAccessType.MODIFY_APP,"moduser");
ContainerLaunchContext clc=ContainerLaunchContext.newInstance(localResources,env,containerCmds,serviceData,containerTokens,acls);
Resource containerRsrc=Resource.newInstance(1357,3);
ContainerTokenIdentifier containerTokenId=new ContainerTokenIdentifier(containerId,"host","user",containerRsrc,9876543210L,42,2468,Priority.newInstance(7),13579);
Token containerToken=Token.newInstance(containerTokenId.getBytes(),ContainerTokenIdentifier.KIND.toString(),"password".getBytes(),"tokenservice");
StartContainerRequest containerReq=StartContainerRequest.newInstance(clc,containerToken);
stateStore.storeContainer(containerId,containerReq);
restartStateStore();
recoveredContainers=stateStore.loadContainersState();
assertEquals(1,recoveredContainers.size());
RecoveredContainerState rcs=recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.REQUESTED,rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID,rcs.getExitCode());
assertEquals(false,rcs.getKilled());
assertEquals(containerReq,rcs.getStartRequest());
assertTrue(rcs.getDiagnostics().isEmpty());
StringBuilder diags=new StringBuilder();
stateStore.storeContainerLaunched(containerId);
diags.append("some diags for container");
stateStore.storeContainerDiagnostics(containerId,diags);
restartStateStore();
recoveredContainers=stateStore.loadContainersState();
assertEquals(1,recoveredContainers.size());
rcs=recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.LAUNCHED,rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID,rcs.getExitCode());
assertEquals(false,rcs.getKilled());
assertEquals(containerReq,rcs.getStartRequest());
assertEquals(diags.toString(),rcs.getDiagnostics());
diags.append("some more diags for container");
stateStore.storeContainerDiagnostics(containerId,diags);
stateStore.storeContainerKilled(containerId);
restartStateStore();
recoveredContainers=stateStore.loadContainersState();
assertEquals(1,recoveredContainers.size());
rcs=recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.LAUNCHED,rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID,rcs.getExitCode());
assertTrue(rcs.getKilled());
assertEquals(containerReq,rcs.getStartRequest());
assertEquals(diags.toString(),rcs.getDiagnostics());
diags.append("some final diags");
stateStore.storeContainerDiagnostics(containerId,diags);
stateStore.storeContainerCompleted(containerId,21);
restartStateStore();
recoveredContainers=stateStore.loadContainersState();
assertEquals(1,recoveredContainers.size());
rcs=recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.COMPLETED,rcs.getStatus());
assertEquals(21,rcs.getExitCode());
assertTrue(rcs.getKilled());
assertEquals(containerReq,rcs.getStartRequest());
assertEquals(diags.toString(),rcs.getDiagnostics());
stateStore.removeContainer(containerId);
restartStateStore();
recoveredContainers=stateStore.loadContainersState();
assertTrue(recoveredContainers.isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testStartResourceLocalization() throws IOException {
String user="somebody";
ApplicationId appId=ApplicationId.newInstance(1,1);
Path appRsrcPath=new Path("hdfs://some/app/resource");
LocalResourcePBImpl rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(appRsrcPath),LocalResourceType.ARCHIVE,LocalResourceVisibility.APPLICATION,123L,456L);
LocalResourceProto appRsrcProto=rsrcPb.getProto();
Path appRsrcLocalPath=new Path("/some/local/dir/for/apprsrc");
stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath);
restartStateStore();
RecoveredLocalizationState state=stateStore.loadLocalizationState();
LocalResourceTrackerState pubts=state.getPublicTrackerState();
assertTrue(pubts.getLocalizedResources().isEmpty());
assertTrue(pubts.getInProgressResources().isEmpty());
Map userResources=state.getUserResources();
assertEquals(1,userResources.size());
RecoveredUserResources rur=userResources.get(user);
LocalResourceTrackerState privts=rur.getPrivateTrackerState();
assertNotNull(privts);
assertTrue(privts.getLocalizedResources().isEmpty());
assertTrue(privts.getInProgressResources().isEmpty());
assertEquals(1,rur.getAppTrackerStates().size());
LocalResourceTrackerState appts=rur.getAppTrackerStates().get(appId);
assertNotNull(appts);
assertTrue(appts.getLocalizedResources().isEmpty());
assertEquals(1,appts.getInProgressResources().size());
assertEquals(appRsrcLocalPath,appts.getInProgressResources().get(appRsrcProto));
Path pubRsrcPath1=new Path("hdfs://some/public/resource1");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto1=rsrcPb.getProto();
Path pubRsrcLocalPath1=new Path("/some/local/dir/for/pubrsrc1");
stateStore.startResourceLocalization(null,null,pubRsrcProto1,pubRsrcLocalPath1);
Path pubRsrcPath2=new Path("hdfs://some/public/resource2");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto2=rsrcPb.getProto();
Path pubRsrcLocalPath2=new Path("/some/local/dir/for/pubrsrc2");
stateStore.startResourceLocalization(null,null,pubRsrcProto2,pubRsrcLocalPath2);
Path privRsrcPath=new Path("hdfs://some/private/resource");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(privRsrcPath),LocalResourceType.PATTERN,LocalResourceVisibility.PRIVATE,789L,680L,"*pattern*");
LocalResourceProto privRsrcProto=rsrcPb.getProto();
Path privRsrcLocalPath=new Path("/some/local/dir/for/privrsrc");
stateStore.startResourceLocalization(user,null,privRsrcProto,privRsrcLocalPath);
restartStateStore();
state=stateStore.loadLocalizationState();
pubts=state.getPublicTrackerState();
assertTrue(pubts.getLocalizedResources().isEmpty());
assertEquals(2,pubts.getInProgressResources().size());
assertEquals(pubRsrcLocalPath1,pubts.getInProgressResources().get(pubRsrcProto1));
assertEquals(pubRsrcLocalPath2,pubts.getInProgressResources().get(pubRsrcProto2));
userResources=state.getUserResources();
assertEquals(1,userResources.size());
rur=userResources.get(user);
privts=rur.getPrivateTrackerState();
assertNotNull(privts);
assertTrue(privts.getLocalizedResources().isEmpty());
assertEquals(1,privts.getInProgressResources().size());
assertEquals(privRsrcLocalPath,privts.getInProgressResources().get(privRsrcProto));
assertEquals(1,rur.getAppTrackerStates().size());
appts=rur.getAppTrackerStates().get(appId);
assertNotNull(appts);
assertTrue(appts.getLocalizedResources().isEmpty());
assertEquals(1,appts.getInProgressResources().size());
assertEquals(appRsrcLocalPath,appts.getInProgressResources().get(appRsrcProto));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testFinishResourceLocalization() throws IOException {
String user="somebody";
ApplicationId appId=ApplicationId.newInstance(1,1);
Path appRsrcPath=new Path("hdfs://some/app/resource");
LocalResourcePBImpl rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(appRsrcPath),LocalResourceType.ARCHIVE,LocalResourceVisibility.APPLICATION,123L,456L);
LocalResourceProto appRsrcProto=rsrcPb.getProto();
Path appRsrcLocalPath=new Path("/some/local/dir/for/apprsrc");
stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath);
LocalizedResourceProto appLocalizedProto=LocalizedResourceProto.newBuilder().setResource(appRsrcProto).setLocalPath(appRsrcLocalPath.toString()).setSize(1234567L).build();
stateStore.finishResourceLocalization(user,appId,appLocalizedProto);
restartStateStore();
RecoveredLocalizationState state=stateStore.loadLocalizationState();
LocalResourceTrackerState pubts=state.getPublicTrackerState();
assertTrue(pubts.getLocalizedResources().isEmpty());
assertTrue(pubts.getInProgressResources().isEmpty());
Map userResources=state.getUserResources();
assertEquals(1,userResources.size());
RecoveredUserResources rur=userResources.get(user);
LocalResourceTrackerState privts=rur.getPrivateTrackerState();
assertNotNull(privts);
assertTrue(privts.getLocalizedResources().isEmpty());
assertTrue(privts.getInProgressResources().isEmpty());
assertEquals(1,rur.getAppTrackerStates().size());
LocalResourceTrackerState appts=rur.getAppTrackerStates().get(appId);
assertNotNull(appts);
assertTrue(appts.getInProgressResources().isEmpty());
assertEquals(1,appts.getLocalizedResources().size());
assertEquals(appLocalizedProto,appts.getLocalizedResources().iterator().next());
Path pubRsrcPath1=new Path("hdfs://some/public/resource1");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto1=rsrcPb.getProto();
Path pubRsrcLocalPath1=new Path("/some/local/dir/for/pubrsrc1");
stateStore.startResourceLocalization(null,null,pubRsrcProto1,pubRsrcLocalPath1);
Path pubRsrcPath2=new Path("hdfs://some/public/resource2");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto2=rsrcPb.getProto();
Path pubRsrcLocalPath2=new Path("/some/local/dir/for/pubrsrc2");
stateStore.startResourceLocalization(null,null,pubRsrcProto2,pubRsrcLocalPath2);
Path privRsrcPath=new Path("hdfs://some/private/resource");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(privRsrcPath),LocalResourceType.PATTERN,LocalResourceVisibility.PRIVATE,789L,680L,"*pattern*");
LocalResourceProto privRsrcProto=rsrcPb.getProto();
Path privRsrcLocalPath=new Path("/some/local/dir/for/privrsrc");
stateStore.startResourceLocalization(user,null,privRsrcProto,privRsrcLocalPath);
LocalizedResourceProto pubLocalizedProto1=LocalizedResourceProto.newBuilder().setResource(pubRsrcProto1).setLocalPath(pubRsrcLocalPath1.toString()).setSize(pubRsrcProto1.getSize()).build();
stateStore.finishResourceLocalization(null,null,pubLocalizedProto1);
LocalizedResourceProto privLocalizedProto=LocalizedResourceProto.newBuilder().setResource(privRsrcProto).setLocalPath(privRsrcLocalPath.toString()).setSize(privRsrcProto.getSize()).build();
stateStore.finishResourceLocalization(user,null,privLocalizedProto);
restartStateStore();
state=stateStore.loadLocalizationState();
pubts=state.getPublicTrackerState();
assertEquals(1,pubts.getLocalizedResources().size());
assertEquals(pubLocalizedProto1,pubts.getLocalizedResources().iterator().next());
assertEquals(1,pubts.getInProgressResources().size());
assertEquals(pubRsrcLocalPath2,pubts.getInProgressResources().get(pubRsrcProto2));
userResources=state.getUserResources();
assertEquals(1,userResources.size());
rur=userResources.get(user);
privts=rur.getPrivateTrackerState();
assertNotNull(privts);
assertEquals(1,privts.getLocalizedResources().size());
assertEquals(privLocalizedProto,privts.getLocalizedResources().iterator().next());
assertTrue(privts.getInProgressResources().isEmpty());
assertEquals(1,rur.getAppTrackerStates().size());
appts=rur.getAppTrackerStates().get(appId);
assertNotNull(appts);
assertTrue(appts.getInProgressResources().isEmpty());
assertEquals(1,appts.getLocalizedResources().size());
assertEquals(appLocalizedProto,appts.getLocalizedResources().iterator().next());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRecovery() throws IOException {
YarnConfiguration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true);
final NodeId nodeId=NodeId.newInstance("somehost",1234);
final ContainerId cid1=BuilderUtils.newContainerId(1,1,1,1);
final ContainerId cid2=BuilderUtils.newContainerId(2,2,2,2);
ContainerTokenKeyGeneratorForTest keygen=new ContainerTokenKeyGeneratorForTest(conf);
NMMemoryStateStoreService stateStore=new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
NMContainerTokenSecretManager secretMgr=new NMContainerTokenSecretManager(conf,stateStore);
secretMgr.setNodeId(nodeId);
MasterKey currentKey=keygen.generateKey();
secretMgr.setMasterKey(currentKey);
ContainerTokenIdentifier tokenId1=createContainerTokenId(cid1,nodeId,"user1",secretMgr);
ContainerTokenIdentifier tokenId2=createContainerTokenId(cid2,nodeId,"user2",secretMgr);
assertNotNull(secretMgr.retrievePassword(tokenId1));
assertNotNull(secretMgr.retrievePassword(tokenId2));
secretMgr=new NMContainerTokenSecretManager(conf,stateStore);
secretMgr.setNodeId(nodeId);
secretMgr.recover();
assertEquals(currentKey,secretMgr.getCurrentKey());
assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
assertTrue(secretMgr.isValidStartContainerRequest(tokenId2));
assertNotNull(secretMgr.retrievePassword(tokenId1));
assertNotNull(secretMgr.retrievePassword(tokenId2));
secretMgr.startContainerSuccessful(tokenId2);
currentKey=keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr=new NMContainerTokenSecretManager(conf,stateStore);
secretMgr.setNodeId(nodeId);
secretMgr.recover();
assertEquals(currentKey,secretMgr.getCurrentKey());
assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
assertFalse(secretMgr.isValidStartContainerRequest(tokenId2));
assertNotNull(secretMgr.retrievePassword(tokenId1));
assertNotNull(secretMgr.retrievePassword(tokenId2));
currentKey=keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr=new NMContainerTokenSecretManager(conf,stateStore);
secretMgr.setNodeId(nodeId);
secretMgr.recover();
assertEquals(currentKey,secretMgr.getCurrentKey());
assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
assertFalse(secretMgr.isValidStartContainerRequest(tokenId2));
try {
secretMgr.retrievePassword(tokenId1);
fail("token should not be valid");
}
catch ( InvalidToken e) {
}
try {
secretMgr.retrievePassword(tokenId2);
fail("token should not be valid");
}
catch ( InvalidToken e) {
}
stateStore.close();
}
APIUtilityVerifier EqualityVerifier
@Test(timeout=30000) public void testComplexGet() throws IOException {
String rootDir=new File(System.getProperty("test.build.data","/tmp")).getAbsolutePath();
File testFile=null;
String processIdInFile=Shell.WINDOWS ? " container_1353742680940_0002_01_000001 " : " 23 ";
String expectedProcessId=processIdInFile.trim();
try {
testFile=new File(rootDir,"temp.txt");
PrintWriter fileWriter=new PrintWriter(testFile);
fileWriter.println(" ");
fileWriter.println("");
fileWriter.println("abc");
fileWriter.println("-123");
fileWriter.println("-123 ");
fileWriter.println(processIdInFile);
fileWriter.println("6236");
fileWriter.close();
String processId=null;
processId=ProcessIdFileReader.getProcessId(new Path(rootDir + Path.SEPARATOR + "temp.txt"));
Assert.assertEquals(expectedProcessId,processId);
}
finally {
if (testFile != null && testFile.exists()) {
testFile.delete();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=30000) public void testContainerLogDirs() throws IOException, YarnException {
File absLogDir=new File("target",TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
String logdirwithFile=absLogDir.toURI().toString();
Configuration conf=new Configuration();
conf.set(YarnConfiguration.NM_LOG_DIRS,logdirwithFile);
NodeHealthCheckerService healthChecker=new NodeHealthCheckerService();
healthChecker.init(conf);
LocalDirsHandlerService dirsHandler=healthChecker.getDiskHandler();
NMContext nmContext=new NodeManager.NMContext(null,null,dirsHandler,new ApplicationACLsManager(conf),new NMNullStateStoreService());
RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(conf);
String user="nobody";
long clusterTimeStamp=1234;
ApplicationId appId=BuilderUtils.newApplicationId(recordFactory,clusterTimeStamp,1);
Application app=mock(Application.class);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1);
ContainerId container1=BuilderUtils.newContainerId(recordFactory,appId,appAttemptId,0);
nmContext.getApplications().put(appId,app);
MockContainer container=new MockContainer(appAttemptId,new AsyncDispatcher(),conf,user,appId,1);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(container1,container);
List files=null;
files=ContainerLogsUtils.getContainerLogDirs(container1,user,nmContext);
Assert.assertTrue(!(files.get(0).toString().contains("file:")));
nmContext.getContainers().remove(container1);
Assert.assertNull(nmContext.getContainers().get(container1));
files=ContainerLogsUtils.getContainerLogDirs(container1,user,nmContext);
Assert.assertTrue(!(files.get(0).toString().contains("file:")));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleNodesXML() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("node").path("info/").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("nodeInfo");
assertEquals("incorrect number of elements",1,nodes.getLength());
verifyNodesXML(nodes);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testContainerLogs() throws IOException {
WebResource r=resource();
final ContainerId containerId=BuilderUtils.newContainerId(0,0,0,0);
final String containerIdStr=BuilderUtils.newContainerId(0,0,0,0).toString();
final ApplicationAttemptId appAttemptId=containerId.getApplicationAttemptId();
final ApplicationId appId=appAttemptId.getApplicationId();
final String appIdStr=appId.toString();
final String filename="logfile1";
final String logMessage="log message\n";
nmContext.getApplications().put(appId,new ApplicationImpl(null,"user",appId,null,nmContext));
MockContainer container=new MockContainer(appAttemptId,new AsyncDispatcher(),new Configuration(),"user",appId,1);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(containerId,container);
Path path=dirsHandler.getLogPathForWrite(ContainerLaunch.getRelativeContainerLogDir(appIdStr,containerIdStr) + "/" + filename,false);
File logFile=new File(path.toUri().getPath());
logFile.deleteOnExit();
assertTrue("Failed to create log dir",logFile.getParentFile().mkdirs());
PrintWriter pw=new PrintWriter(logFile);
pw.print(logMessage);
pw.close();
ClientResponse response=r.path("ws").path("v1").path("node").path("containerlogs").path(containerIdStr).path(filename).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
String responseText=response.getEntity(String.class);
assertEquals(logMessage,responseText);
response=r.path("ws").path("v1").path("node").path("containerlogs").path(containerIdStr).path("uhhh").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
Assert.assertEquals(Status.NOT_FOUND.getStatusCode(),response.getStatus());
responseText=response.getEntity(String.class);
assertTrue(responseText.contains("Cannot find this log on the local disk."));
nmContext.getContainers().remove(containerId);
Assert.assertNull(nmContext.getContainers().get(containerId));
response=r.path("ws").path("v1").path("node").path("containerlogs").path(containerIdStr).path(filename).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText=response.getEntity(String.class);
assertEquals(logMessage,responseText);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNodeAppsXML() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
addAppContainers(app);
Application app2=new MockApp(2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
ClientResponse response=r.path("ws").path("v1").path("node").path("apps").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("app");
assertEquals("incorrect number of elements",2,nodes.getLength());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNodeSingleAppsXML() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
HashMap hash=addAppContainers(app);
Application app2=new MockApp(2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
ClientResponse response=r.path("ws").path("v1").path("node").path("apps").path(app.getAppId().toString() + "/").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("app");
assertEquals("incorrect number of elements",1,nodes.getLength());
verifyNodeAppInfoXML(nodes,app,hash);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNodeSingleContainerXML() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
HashMap hash=addAppContainers(app);
Application app2=new MockApp(2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
for ( String id : hash.keySet()) {
ClientResponse response=r.path("ws").path("v1").path("node").path("containers").path(id).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("container");
assertEquals("incorrect number of elements",1,nodes.getLength());
verifyContainersInfoXML(nodes,nmContext.getContainers().get(ConverterUtils.toContainerId(id)));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNodeContainerXML() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
addAppContainers(app);
Application app2=new MockApp(2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
ClientResponse response=r.path("ws").path("v1").path("node").path("containers").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("container");
assertEquals("incorrect number of elements",4,nodes.getLength());
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAuthorizedAccess() throws Exception {
MyContainerManager containerManager=new MyContainerManager();
rm=new MockRMWithAMS(conf,containerManager);
rm.start();
MockNM nm1=rm.registerNode("localhost:1234",5120);
Map acls=new HashMap(2);
acls.put(ApplicationAccessType.VIEW_APP,"*");
RMApp app=rm.submitApp(1024,"appname","appuser",acls);
nm1.nodeHeartbeat(true);
int waitCount=0;
while (containerManager.containerTokens == null && waitCount++ < 20) {
LOG.info("Waiting for AM Launch to happen..");
Thread.sleep(1000);
}
Assert.assertNotNull(containerManager.containerTokens);
RMAppAttempt attempt=app.getCurrentAppAttempt();
ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId();
waitForLaunchedState(attempt);
final Configuration conf=rm.getConfig();
final YarnRPC rpc=YarnRPC.create(conf);
UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
Credentials credentials=containerManager.getContainerCredentials();
final InetSocketAddress rmBindAddress=rm.getApplicationMasterService().getBindAddress();
Token extends TokenIdentifier> amRMToken=MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,credentials.getAllTokens());
currentUser.addToken(amRMToken);
ApplicationMasterProtocol client=currentUser.doAs(new PrivilegedAction(){
@Override public ApplicationMasterProtocol run(){
return (ApplicationMasterProtocol)rpc.getProxy(ApplicationMasterProtocol.class,rm.getApplicationMasterService().getBindAddress(),conf);
}
}
);
RegisterApplicationMasterRequest request=Records.newRecord(RegisterApplicationMasterRequest.class);
RegisterApplicationMasterResponse response=client.registerApplicationMaster(request);
Assert.assertNotNull(response.getClientToAMTokenMasterKey());
if (UserGroupInformation.isSecurityEnabled()) {
Assert.assertTrue(response.getClientToAMTokenMasterKey().array().length > 0);
}
Assert.assertEquals("Register response has bad ACLs","*",response.getApplicationACLs().get(ApplicationAccessType.VIEW_APP));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRMAppSubmitDuplicateApplicationId() throws Exception {
ApplicationId appId=MockApps.newAppID(0);
asContext.setApplicationId(appId);
RMApp appOrig=rmContext.getRMApps().get(appId);
Assert.assertTrue("app name matches but shouldn't","testApp1" != appOrig.getName());
try {
appMonitor.submitApplication(asContext,"test");
Assert.fail("Exception is expected when applicationId is duplicate.");
}
catch ( YarnException e) {
Assert.assertTrue("The thrown exception is not the expectd one.",e.getMessage().contains("Cannot add a duplicate!"));
}
RMApp app=rmContext.getRMApps().get(appId);
Assert.assertNotNull("app is null",app);
Assert.assertEquals("app id doesn't match",appId,app.getApplicationId());
Assert.assertEquals("app state doesn't match",RMAppState.FINISHED,app.getState());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testRMAppSubmitMaxAppAttempts() throws Exception {
int[] globalMaxAppAttempts=new int[]{10,1};
int[][] individualMaxAppAttempts=new int[][]{new int[]{9,10,11,0},new int[]{1,10,0,-1}};
int[][] expectedNums=new int[][]{new int[]{9,10,10,10},new int[]{1,1,1,1}};
for (int i=0; i < globalMaxAppAttempts.length; ++i) {
for (int j=0; j < individualMaxAppAttempts.length; ++j) {
ResourceScheduler scheduler=mockResourceScheduler();
Configuration conf=new Configuration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,globalMaxAppAttempts[i]);
ApplicationMasterService masterService=new ApplicationMasterService(rmContext,scheduler);
TestRMAppManager appMonitor=new TestRMAppManager(rmContext,new ClientToAMTokenSecretManagerInRM(),scheduler,masterService,new ApplicationACLsManager(conf),conf);
ApplicationId appID=MockApps.newAppID(i * 4 + j + 1);
asContext.setApplicationId(appID);
if (individualMaxAppAttempts[i][j] != 0) {
asContext.setMaxAppAttempts(individualMaxAppAttempts[i][j]);
}
appMonitor.submitApplication(asContext,"test");
RMApp app=rmContext.getRMApps().get(appID);
Assert.assertEquals("max application attempts doesn't match",expectedNums[i][j],app.getMaxAppAttempts());
int timeoutSecs=0;
while ((getAppEventType() == RMAppEventType.KILL) && timeoutSecs++ < 20) {
Thread.sleep(1000);
}
setAppEventType(RMAppEventType.KILL);
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRMAppSubmit() throws Exception {
appMonitor.submitApplication(asContext,"test");
RMApp app=rmContext.getRMApps().get(appId);
Assert.assertNotNull("app is null",app);
Assert.assertEquals("app id doesn't match",appId,app.getApplicationId());
Assert.assertEquals("app state doesn't match",RMAppState.NEW,app.getState());
int timeoutSecs=0;
while ((getAppEventType() == RMAppEventType.KILL) && timeoutSecs++ < 20) {
Thread.sleep(1000);
}
Assert.assertEquals("app event type sent is wrong",RMAppEventType.START,getAppEventType());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@SuppressWarnings("resource") @Test public void testContainerCleanup() throws Exception {
Logger rootLogger=LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
final DrainDispatcher dispatcher=new DrainDispatcher();
MockRM rm=new MockRM(){
@Override protected EventHandler createSchedulerEventDispatcher(){
return new SchedulerEventDispatcher(this.scheduler){
@Override public void handle( SchedulerEvent event){
scheduler.handle(event);
}
}
;
}
@Override protected Dispatcher createDispatcher(){
return dispatcher;
}
}
;
rm.start();
MockNM nm1=rm.registerNode("127.0.0.1:1234",5000);
RMApp app=rm.submitApp(2000);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt=app.getCurrentAppAttempt();
MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId());
am.registerAppAttempt();
int request=2;
am.allocate("127.0.0.1",1000,request,new ArrayList());
dispatcher.await();
nm1.nodeHeartbeat(true);
List conts=am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
int contReceived=conts.size();
int waitCount=0;
while (contReceived < request && waitCount++ < 200) {
LOG.info("Got " + contReceived + " containers. Waiting to get "+ request);
Thread.sleep(100);
conts=am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
dispatcher.await();
contReceived+=conts.size();
nm1.nodeHeartbeat(true);
}
Assert.assertEquals(request,contReceived);
ArrayList release=new ArrayList();
release.add(conts.get(0).getId());
am.allocate(new ArrayList(),release);
dispatcher.await();
Map> containerStatuses=new HashMap>();
ArrayList containerStatusList=new ArrayList();
containerStatusList.add(BuilderUtils.newContainerStatus(conts.get(0).getId(),ContainerState.RUNNING,"nothing",0));
containerStatuses.put(app.getApplicationId(),containerStatusList);
NodeHeartbeatResponse resp=nm1.nodeHeartbeat(containerStatuses,true);
waitForContainerCleanup(dispatcher,nm1,resp);
LOG.info("Testing container launch much after release and " + "NM getting cleanup");
containerStatuses.clear();
containerStatusList.clear();
containerStatusList.add(BuilderUtils.newContainerStatus(conts.get(0).getId(),ContainerState.RUNNING,"nothing",0));
containerStatuses.put(app.getApplicationId(),containerStatusList);
resp=nm1.nodeHeartbeat(containerStatuses,true);
waitForContainerCleanup(dispatcher,nm1,resp);
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@SuppressWarnings("resource") @Test public void testAppCleanup() throws Exception {
Logger rootLogger=LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
MockRM rm=new MockRM();
rm.start();
MockNM nm1=rm.registerNode("127.0.0.1:1234",5000);
RMApp app=rm.submitApp(2000);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt=app.getCurrentAppAttempt();
MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId());
am.registerAppAttempt();
int request=2;
am.allocate("127.0.0.1",1000,request,new ArrayList());
nm1.nodeHeartbeat(true);
List conts=am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
int contReceived=conts.size();
int waitCount=0;
while (contReceived < request && waitCount++ < 200) {
LOG.info("Got " + contReceived + " containers. Waiting to get "+ request);
Thread.sleep(100);
conts=am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
contReceived+=conts.size();
nm1.nodeHeartbeat(true);
}
Assert.assertEquals(request,contReceived);
am.unregisterAppAttempt();
NodeHeartbeatResponse resp=nm1.nodeHeartbeat(attempt.getAppAttemptId(),1,ContainerState.COMPLETE);
am.waitForState(RMAppAttemptState.FINISHED);
resp=nm1.nodeHeartbeat(true);
List containersToCleanup=resp.getContainersToCleanup();
List appsToCleanup=resp.getApplicationsToCleanup();
int numCleanedContainers=containersToCleanup.size();
int numCleanedApps=appsToCleanup.size();
waitCount=0;
while ((numCleanedContainers < 2 || numCleanedApps < 1) && waitCount++ < 200) {
LOG.info("Waiting to get cleanup events.. cleanedConts: " + numCleanedContainers + " cleanedApps: "+ numCleanedApps);
Thread.sleep(100);
resp=nm1.nodeHeartbeat(true);
List deltaContainersToCleanup=resp.getContainersToCleanup();
List deltaAppsToCleanup=resp.getApplicationsToCleanup();
containersToCleanup.addAll(deltaContainersToCleanup);
appsToCleanup.addAll(deltaAppsToCleanup);
numCleanedContainers=containersToCleanup.size();
numCleanedApps=appsToCleanup.size();
}
Assert.assertEquals(1,appsToCleanup.size());
Assert.assertEquals(app.getApplicationId(),appsToCleanup.get(0));
Assert.assertEquals(1,numCleanedApps);
Assert.assertEquals(2,numCleanedContainers);
rm.stop();
}
APIUtilityVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=1200000) public void testFinishApplicationMasterBeforeRegistering() throws Exception {
MockRM rm=new MockRM(conf);
try {
rm.start();
MockNM nm1=rm.registerNode("127.0.0.1:1234",6 * GB);
RMApp app1=rm.submitApp(2048);
MockAM am1=MockRM.launchAM(app1,rm,nm1);
FinishApplicationMasterRequest req=FinishApplicationMasterRequest.newInstance(FinalApplicationStatus.FAILED,"","");
Throwable cause=null;
try {
am1.unregisterAppAttempt(req,false);
}
catch ( Exception e) {
cause=e.getCause();
}
Assert.assertNotNull(cause);
Assert.assertTrue(cause instanceof ApplicationMasterNotRegisteredException);
Assert.assertNotNull(cause.getMessage());
Assert.assertTrue(cause.getMessage().contains("Application Master is trying to unregister before registering for:"));
am1.registerAppAttempt();
am1.unregisterAppAttempt(req,false);
}
finally {
if (rm != null) {
rm.stop();
}
}
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test public void testForceKillNonExistingApplication() throws YarnException {
RMContext rmContext=mock(RMContext.class);
when(rmContext.getRMApps()).thenReturn(new ConcurrentHashMap());
ClientRMService rmService=new ClientRMService(rmContext,null,null,null,null,null);
ApplicationId applicationId=BuilderUtils.newApplicationId(System.currentTimeMillis(),0);
KillApplicationRequest request=KillApplicationRequest.newInstance(applicationId);
try {
rmService.forceKillApplication(request);
Assert.fail();
}
catch ( ApplicationNotFoundException ex) {
Assert.assertEquals(ex.getMessage(),"Trying to kill an absent " + "application " + request.getApplicationId());
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testGetContainerReport() throws YarnException, IOException {
ClientRMService rmService=createRMService();
RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null);
GetContainerReportRequest request=recordFactory.newRecordInstance(GetContainerReportRequest.class);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(ApplicationId.newInstance(123456,1),1);
ContainerId containerId=ContainerId.newInstance(attemptId,1);
request.setContainerId(containerId);
try {
GetContainerReportResponse response=rmService.getContainerReport(request);
Assert.assertEquals(containerId,response.getContainerReport().getContainerId());
}
catch ( ApplicationNotFoundException ex) {
Assert.fail(ex.getMessage());
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) @SuppressWarnings("rawtypes") public void testAppSubmit() throws Exception {
YarnScheduler yarnScheduler=mockYarnScheduler();
RMContext rmContext=mock(RMContext.class);
mockRMContext(yarnScheduler,rmContext);
RMStateStore stateStore=mock(RMStateStore.class);
when(rmContext.getStateStore()).thenReturn(stateStore);
RMAppManager appManager=new RMAppManager(rmContext,yarnScheduler,null,mock(ApplicationACLsManager.class),new Configuration());
when(rmContext.getDispatcher().getEventHandler()).thenReturn(new EventHandler(){
public void handle( Event event){
}
}
);
ApplicationId appId1=getApplicationId(100);
ApplicationACLsManager mockAclsManager=mock(ApplicationACLsManager.class);
when(mockAclsManager.checkAccess(UserGroupInformation.getCurrentUser(),ApplicationAccessType.VIEW_APP,null,appId1)).thenReturn(true);
QueueACLsManager mockQueueACLsManager=mock(QueueACLsManager.class);
when(mockQueueACLsManager.checkAccess(any(UserGroupInformation.class),any(QueueACL.class),anyString())).thenReturn(true);
ClientRMService rmService=new ClientRMService(rmContext,yarnScheduler,appManager,mockAclsManager,mockQueueACLsManager,null);
SubmitApplicationRequest submitRequest1=mockSubmitAppRequest(appId1,null,null);
try {
rmService.submitApplication(submitRequest1);
}
catch ( YarnException e) {
Assert.fail("Exception is not expected.");
}
RMApp app1=rmContext.getRMApps().get(appId1);
Assert.assertNotNull("app doesn't exist",app1);
Assert.assertEquals("app name doesn't match",YarnConfiguration.DEFAULT_APPLICATION_NAME,app1.getName());
Assert.assertEquals("app queue doesn't match",YarnConfiguration.DEFAULT_QUEUE_NAME,app1.getQueue());
String name=MockApps.newAppName();
String queue=MockApps.newQueue();
ApplicationId appId2=getApplicationId(101);
SubmitApplicationRequest submitRequest2=mockSubmitAppRequest(appId2,name,queue);
submitRequest2.getApplicationSubmissionContext().setApplicationType("matchType");
try {
rmService.submitApplication(submitRequest2);
}
catch ( YarnException e) {
Assert.fail("Exception is not expected.");
}
RMApp app2=rmContext.getRMApps().get(appId2);
Assert.assertNotNull("app doesn't exist",app2);
Assert.assertEquals("app name doesn't match",name,app2.getName());
Assert.assertEquals("app queue doesn't match",queue,app2.getQueue());
try {
rmService.submitApplication(submitRequest2);
}
catch ( YarnException e) {
Assert.fail("Exception is not expected.");
}
GetApplicationsRequest getAllAppsRequest=GetApplicationsRequest.newInstance(new HashSet());
GetApplicationsResponse getAllApplicationsResponse=rmService.getApplications(getAllAppsRequest);
Assert.assertEquals(5,getAllApplicationsResponse.getApplicationList().size());
Set appTypes=new HashSet();
appTypes.add("matchType");
getAllAppsRequest=GetApplicationsRequest.newInstance(appTypes);
getAllApplicationsResponse=rmService.getApplications(getAllAppsRequest);
Assert.assertEquals(1,getAllApplicationsResponse.getApplicationList().size());
Assert.assertEquals(appId2,getAllApplicationsResponse.getApplicationList().get(0).getApplicationId());
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testGetClusterNodes() throws Exception {
MockRM rm=new MockRM(){
protected ClientRMService createClientRMService(){
return new ClientRMService(this.rmContext,scheduler,this.rmAppManager,this.applicationACLsManager,this.queueACLsManager,this.getRMContext().getRMDelegationTokenSecretManager());
}
}
;
rm.start();
MockNM node=rm.registerNode("host1:1234",1024);
rm.sendNodeStarted(node);
node.nodeHeartbeat(true);
MockNM lostNode=rm.registerNode("host2:1235",1024);
rm.sendNodeStarted(lostNode);
lostNode.nodeHeartbeat(true);
rm.NMwaitForState(lostNode.getNodeId(),NodeState.RUNNING);
rm.sendNodeLost(lostNode);
Configuration conf=new Configuration();
YarnRPC rpc=YarnRPC.create(conf);
InetSocketAddress rmAddress=rm.getClientRMService().getBindAddress();
LOG.info("Connecting to ResourceManager at " + rmAddress);
ApplicationClientProtocol client=(ApplicationClientProtocol)rpc.getProxy(ApplicationClientProtocol.class,rmAddress,conf);
GetClusterNodesRequest request=GetClusterNodesRequest.newInstance(EnumSet.of(NodeState.RUNNING));
List nodeReports=client.getClusterNodes(request).getNodeReports();
Assert.assertEquals(1,nodeReports.size());
Assert.assertNotSame("Node is expected to be healthy!",NodeState.UNHEALTHY,nodeReports.get(0).getNodeState());
node.nodeHeartbeat(false);
nodeReports=client.getClusterNodes(request).getNodeReports();
Assert.assertEquals("Unhealthy nodes should not show up by default",0,nodeReports.size());
request=GetClusterNodesRequest.newInstance(EnumSet.of(NodeState.UNHEALTHY));
nodeReports=client.getClusterNodes(request).getNodeReports();
Assert.assertEquals(1,nodeReports.size());
Assert.assertEquals("Node is expected to be unhealthy!",NodeState.UNHEALTHY,nodeReports.get(0).getNodeState());
rm.registerNode("host3:1236",1024);
request=GetClusterNodesRequest.newInstance(EnumSet.allOf(NodeState.class));
nodeReports=client.getClusterNodes(request).getNodeReports();
Assert.assertEquals(3,nodeReports.size());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testGetContainers() throws YarnException, IOException {
ClientRMService rmService=createRMService();
RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null);
GetContainersRequest request=recordFactory.newRecordInstance(GetContainersRequest.class);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(ApplicationId.newInstance(123456,1),1);
ContainerId containerId=ContainerId.newInstance(attemptId,1);
request.setApplicationAttemptId(attemptId);
try {
GetContainersResponse response=rmService.getContainers(request);
Assert.assertEquals(containerId,response.getContainerList().get(0).getContainerId());
}
catch ( ApplicationNotFoundException ex) {
Assert.fail(ex.getMessage());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=50000) public void testBlackListNodes() throws Exception {
Configuration conf=new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,FifoScheduler.class,ResourceScheduler.class);
MockRM rm=new MockRM(conf);
rm.start();
FifoScheduler fs=(FifoScheduler)rm.getResourceScheduler();
int rack_num_0=0;
int rack_num_1=1;
String host_0_0="127.0.0.1";
RMNode n1=MockNodes.newNodeInfo(rack_num_0,MockNodes.newResource(4 * GB),1,host_0_0);
fs.handle(new NodeAddedSchedulerEvent(n1));
String host_0_1="127.0.0.2";
RMNode n2=MockNodes.newNodeInfo(rack_num_0,MockNodes.newResource(4 * GB),1,host_0_1);
fs.handle(new NodeAddedSchedulerEvent(n2));
String host_1_0="127.0.0.3";
RMNode n3=MockNodes.newNodeInfo(rack_num_1,MockNodes.newResource(4 * GB),1,host_1_0);
fs.handle(new NodeAddedSchedulerEvent(n3));
String host_1_1="127.0.0.4";
RMNode n4=MockNodes.newNodeInfo(rack_num_1,MockNodes.newResource(4 * GB),1,host_1_1);
fs.handle(new NodeAddedSchedulerEvent(n4));
ApplicationId appId1=BuilderUtils.newApplicationId(100,1);
ApplicationAttemptId appAttemptId1=BuilderUtils.newApplicationAttemptId(appId1,1);
SchedulerEvent appEvent=new AppAddedSchedulerEvent(appId1,"queue","user");
fs.handle(appEvent);
SchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId1,false);
fs.handle(attemptEvent);
List emptyId=new ArrayList();
List emptyAsk=new ArrayList();
List ask1=new ArrayList();
ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),"rack1",BuilderUtils.newResource(GB,1),1));
ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),ResourceRequest.ANY,BuilderUtils.newResource(GB,1),1));
fs.allocate(appAttemptId1,ask1,emptyId,Collections.singletonList(host_1_0),null);
fs.handle(new NodeUpdateSchedulerEvent(n3));
Allocation allocation1=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null);
Assert.assertEquals("allocation1",0,allocation1.getContainers().size());
fs.handle(new NodeUpdateSchedulerEvent(n4));
Allocation allocation2=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null);
Assert.assertEquals("allocation2",1,allocation2.getContainers().size());
List containerList=allocation2.getContainers();
for ( Container container : containerList) {
Assert.assertEquals("Container is allocated on n4",container.getNodeId(),n4.getNodeID());
}
List ask2=new ArrayList();
ask2.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),ResourceRequest.ANY,BuilderUtils.newResource(GB,1),1));
fs.allocate(appAttemptId1,ask2,emptyId,Collections.singletonList("rack0"),null);
fs.handle(new NodeUpdateSchedulerEvent(n1));
Allocation allocation3=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null);
Assert.assertEquals("allocation3",0,allocation3.getContainers().size());
fs.handle(new NodeUpdateSchedulerEvent(n2));
Allocation allocation4=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null);
Assert.assertEquals("allocation4",0,allocation4.getContainers().size());
fs.handle(new NodeUpdateSchedulerEvent(n3));
Allocation allocation5=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null);
Assert.assertEquals("allocation5",0,allocation5.getContainers().size());
fs.handle(new NodeUpdateSchedulerEvent(n4));
Allocation allocation6=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null);
Assert.assertEquals("allocation6",1,allocation6.getContainers().size());
containerList=allocation6.getContainers();
for ( Container container : containerList) {
Assert.assertEquals("Container is allocated on n4",container.getNodeId(),n4.getNodeID());
}
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void test() throws Exception {
Logger rootLogger=LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
MockRM rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("127.0.0.1:1234",6 * GB);
MockNM nm2=rm.registerNode("127.0.0.2:5678",4 * GB);
RMApp app1=rm.submitApp(2048);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
MockAM am1=rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
SchedulerNodeReport report_nm1=rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
Assert.assertEquals(2 * GB,report_nm1.getUsedResource().getMemory());
RMApp app2=rm.submitApp(2048);
nm2.nodeHeartbeat(true);
RMAppAttempt attempt2=app2.getCurrentAppAttempt();
MockAM am2=rm.sendAMLaunched(attempt2.getAppAttemptId());
am2.registerAppAttempt();
SchedulerNodeReport report_nm2=rm.getResourceScheduler().getNodeReport(nm2.getNodeId());
Assert.assertEquals(2 * GB,report_nm2.getUsedResource().getMemory());
am1.addRequests(new String[]{"127.0.0.1","127.0.0.2"},GB,1,1);
AllocateResponse alloc1Response=am1.schedule();
am2.addRequests(new String[]{"127.0.0.1","127.0.0.2"},3 * GB,0,1);
AllocateResponse alloc2Response=am2.schedule();
nm1.nodeHeartbeat(true);
while (alloc1Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 1...");
Thread.sleep(1000);
alloc1Response=am1.schedule();
}
while (alloc2Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 2...");
Thread.sleep(1000);
alloc2Response=am2.schedule();
}
nm2.nodeHeartbeat(true);
List allocated1=alloc1Response.getAllocatedContainers();
Assert.assertEquals(1,allocated1.size());
Assert.assertEquals(1 * GB,allocated1.get(0).getResource().getMemory());
Assert.assertEquals(nm1.getNodeId(),allocated1.get(0).getNodeId());
List allocated2=alloc2Response.getAllocatedContainers();
Assert.assertEquals(1,allocated2.size());
Assert.assertEquals(3 * GB,allocated2.get(0).getResource().getMemory());
Assert.assertEquals(nm1.getNodeId(),allocated2.get(0).getNodeId());
report_nm1=rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
report_nm2=rm.getResourceScheduler().getNodeReport(nm2.getNodeId());
Assert.assertEquals(0,report_nm1.getAvailableResource().getMemory());
Assert.assertEquals(2 * GB,report_nm2.getAvailableResource().getMemory());
Assert.assertEquals(6 * GB,report_nm1.getUsedResource().getMemory());
Assert.assertEquals(2 * GB,report_nm2.getUsedResource().getMemory());
Container c1=allocated1.get(0);
Assert.assertEquals(GB,c1.getResource().getMemory());
ContainerStatus containerStatus=BuilderUtils.newContainerStatus(c1.getId(),ContainerState.COMPLETE,"",0);
nm1.containerStatus(containerStatus);
int waitCount=0;
while (attempt1.getJustFinishedContainers().size() < 1 && waitCount++ != 20) {
LOG.info("Waiting for containers to be finished for app 1... Tried " + waitCount + " times already..");
Thread.sleep(1000);
}
Assert.assertEquals(1,attempt1.getJustFinishedContainers().size());
Assert.assertEquals(1,am1.schedule().getCompletedContainersStatuses().size());
report_nm1=rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
Assert.assertEquals(5 * GB,report_nm1.getUsedResource().getMemory());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=50000) public void testHeadroom() throws Exception {
Configuration conf=new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,FifoScheduler.class,ResourceScheduler.class);
MockRM rm=new MockRM(conf);
rm.start();
FifoScheduler fs=(FifoScheduler)rm.getResourceScheduler();
RMNode n1=MockNodes.newNodeInfo(0,MockNodes.newResource(4 * GB),1,"127.0.0.2");
fs.handle(new NodeAddedSchedulerEvent(n1));
ApplicationId appId1=BuilderUtils.newApplicationId(100,1);
ApplicationAttemptId appAttemptId1=BuilderUtils.newApplicationAttemptId(appId1,1);
SchedulerEvent appEvent=new AppAddedSchedulerEvent(appId1,"queue","user");
fs.handle(appEvent);
SchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId1,false);
fs.handle(attemptEvent);
ApplicationId appId2=BuilderUtils.newApplicationId(200,2);
ApplicationAttemptId appAttemptId2=BuilderUtils.newApplicationAttemptId(appId2,1);
SchedulerEvent appEvent2=new AppAddedSchedulerEvent(appId2,"queue","user");
fs.handle(appEvent2);
SchedulerEvent attemptEvent2=new AppAttemptAddedSchedulerEvent(appAttemptId2,false);
fs.handle(attemptEvent2);
List emptyId=new ArrayList();
List emptyAsk=new ArrayList();
List ask1=new ArrayList();
ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),ResourceRequest.ANY,BuilderUtils.newResource(GB,1),1));
fs.allocate(appAttemptId1,ask1,emptyId,null,null);
List ask2=new ArrayList();
ask2.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),ResourceRequest.ANY,BuilderUtils.newResource(2 * GB,1),1));
fs.allocate(appAttemptId2,ask2,emptyId,null,null);
fs.handle(new NodeUpdateSchedulerEvent(n1));
Allocation allocation1=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null);
Assert.assertEquals("Allocation headroom",1 * GB,allocation1.getResourceLimit().getMemory());
Allocation allocation2=fs.allocate(appAttemptId2,emptyAsk,emptyId,null,null);
Assert.assertEquals("Allocation headroom",1 * GB,allocation2.getResourceLimit().getMemory());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Validate killing an application when it is at accepted state.
* @throws Exception exception
*/
@Test(timeout=60000) public void testApplicationKillAtAcceptedState() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
final Dispatcher dispatcher=new AsyncDispatcher(){
@Override public EventHandler getEventHandler(){
class EventArgMatcher extends ArgumentMatcher {
@Override public boolean matches( Object argument){
if (argument instanceof RMAppAttemptEvent) {
if (((RMAppAttemptEvent)argument).getType().equals(RMAppAttemptEventType.KILL)) {
return true;
}
}
return false;
}
}
EventHandler handler=spy(super.getEventHandler());
doNothing().when(handler).handle(argThat(new EventArgMatcher()));
return handler;
}
}
;
MockRM rm=new MockRM(conf){
@Override protected Dispatcher createDispatcher(){
return dispatcher;
}
}
;
QueueMetrics metrics=rm.getResourceScheduler().getRootQueueMetrics();
int appsKilled=metrics.getAppsKilled();
int appsSubmitted=metrics.getAppsSubmitted();
rm.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm.getResourceTrackerService());
nm1.registerNode();
RMApp application=rm.submitApp(200);
MockAM am=MockRM.launchAM(application,rm,nm1);
am.waitForState(RMAppAttemptState.LAUNCHED);
nm1.nodeHeartbeat(am.getApplicationAttemptId(),1,ContainerState.RUNNING);
rm.waitForState(application.getApplicationId(),RMAppState.ACCEPTED);
KillApplicationRequest request=KillApplicationRequest.newInstance(application.getApplicationId());
rm.getClientRMService().forceKillApplication(request);
am.registerAppAttempt(false);
rm.waitForState(application.getApplicationId(),RMAppState.KILLING);
rm.waitForState(am.getApplicationAttemptId(),RMAppAttemptState.RUNNING);
rm.getRMContext().getDispatcher().getEventHandler().handle(new RMAppEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_KILLED));
rm.waitForState(application.getApplicationId(),RMAppState.KILLED);
metrics=rm.getResourceScheduler().getRootQueueMetrics();
Assert.assertEquals(appsKilled + 1,metrics.getAppsKilled());
Assert.assertEquals(appsSubmitted + 1,metrics.getAppsSubmitted());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testInvalidatedAMHostPortOnAMRestart() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
MockRM rm1=new MockRM(conf);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app2=rm1.submitApp(200);
MockAM am2=MockRM.launchAndRegisterAM(app2,rm1,nm1);
nm1.nodeHeartbeat(am2.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am2.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app2.getApplicationId(),RMAppState.ACCEPTED);
GetApplicationReportRequest request1=GetApplicationReportRequest.newInstance(app2.getApplicationId());
ApplicationReport report1=rm1.getClientRMService().getApplicationReport(request1).getApplicationReport();
Assert.assertEquals("N/A",report1.getHost());
Assert.assertEquals(-1,report1.getRpcPort());
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=80000) public void testInvalidateAMHostPortWhenAMFailedOrKilled() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
MockRM rm1=new MockRM(conf);
rm1.start();
RMApp app1=rm1.submitApp(200);
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
MockRM.finishAMAndVerifyAppState(app1,rm1,nm1,am1);
RMApp app2=rm1.submitApp(200);
MockAM am2=MockRM.launchAndRegisterAM(app2,rm1,nm1);
nm1.nodeHeartbeat(am2.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am2.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app2.getApplicationId(),RMAppState.FAILED);
RMApp app3=rm1.submitApp(200);
MockAM am3=MockRM.launchAndRegisterAM(app3,rm1,nm1);
rm1.killApp(app3.getApplicationId());
rm1.waitForState(app3.getApplicationId(),RMAppState.KILLED);
rm1.waitForState(am3.getApplicationAttemptId(),RMAppAttemptState.KILLED);
GetApplicationsRequest request1=GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.FINISHED,YarnApplicationState.KILLED,YarnApplicationState.FAILED));
GetApplicationsResponse response1=rm1.getClientRMService().getApplications(request1);
List appList1=response1.getApplicationList();
Assert.assertEquals(3,appList1.size());
for ( ApplicationReport report : appList1) {
if (report.getApplicationId().equals(app2.getApplicationId()) || report.getApplicationId().equals(app3.getApplicationId())) {
Assert.assertEquals("N/A",report.getHost());
Assert.assertEquals(-1,report.getRpcPort());
}
if (report.getApplicationId().equals(app1.getApplicationId())) {
Assert.assertFalse(report.getHost().equals("N/A"));
Assert.assertTrue(report.getRpcPort() != -1);
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testNMTokenSentForNormalContainer() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.set(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class.getCanonicalName());
MockRM rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("h1:1234",5120);
RMApp app=rm.submitApp(2000);
RMAppAttempt attempt=app.getCurrentAppAttempt();
CapacityScheduler cs=(CapacityScheduler)rm.getResourceScheduler();
cs.getApplicationAttempt(attempt.getAppAttemptId()).getNewContainerId();
nm1.nodeHeartbeat(true);
MockAM am=MockRM.launchAM(app,rm,nm1);
Assert.assertTrue(attempt.getMasterContainer().getId().getId() != 1);
Assert.assertFalse(rm.getRMContext().getNMTokenSecretManager().isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm1.getNodeId()));
am.registerAppAttempt();
rm.waitForState(app.getApplicationId(),RMAppState.RUNNING);
int NUM_CONTAINERS=1;
List containers=new ArrayList();
List expectedNMTokens=new ArrayList();
while (true) {
AllocateResponse response=am.allocate("127.0.0.1",2000,NUM_CONTAINERS,new ArrayList());
nm1.nodeHeartbeat(true);
containers.addAll(response.getAllocatedContainers());
expectedNMTokens.addAll(response.getNMTokens());
if (containers.size() == NUM_CONTAINERS) {
break;
}
Thread.sleep(200);
System.out.println("Waiting for container to be allocated.");
}
NodeId nodeId=expectedNMTokens.get(0).getNodeId();
Assert.assertEquals(nm1.getNodeId(),nodeId);
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testRMInitialsWithFileSystemBasedConfigurationProvider() throws Exception {
configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
final File excludeHostsFile=new File(tmpDir.toString(),"excludeHosts");
if (excludeHostsFile.exists()) {
excludeHostsFile.delete();
}
if (!excludeHostsFile.createNewFile()) {
Assert.fail("Can not create " + "excludeHosts");
}
PrintWriter fileWriter=new PrintWriter(excludeHostsFile);
fileWriter.write("0.0.0.0:123");
fileWriter.close();
uploadToRemoteFileSystem(new Path(excludeHostsFile.getAbsolutePath()));
YarnConfiguration yarnConf=new YarnConfiguration();
yarnConf.set(YarnConfiguration.YARN_ADMIN_ACL,"world:anyone:rwcda");
yarnConf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,this.workingPath + "/excludeHosts");
uploadConfiguration(yarnConf,"yarn-site.xml");
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
csConf.set("yarn.scheduler.capacity.maximum-applications","5000");
uploadConfiguration(csConf,"capacity-scheduler.xml");
String aclsString="alice,bob users,wheel";
Configuration newConf=new Configuration();
newConf.set("security.applicationclient.protocol.acl",aclsString);
uploadConfiguration(newConf,"hadoop-policy.xml");
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,true);
conf.set("hadoop.proxyuser.test.groups","test_groups");
conf.set("hadoop.proxyuser.test.hosts","test_hosts");
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MockUnixGroupsMapping.class,GroupMappingServiceProvider.class);
uploadConfiguration(conf,"core-site.xml");
MockUnixGroupsMapping.updateGroups();
ResourceManager resourceManager=null;
try {
try {
resourceManager=new ResourceManager();
resourceManager.init(configuration);
resourceManager.start();
}
catch ( Exception ex) {
fail("Should not get any exceptions");
}
Set excludeHosts=resourceManager.getRMContext().getNodesListManager().getHostsReader().getExcludedHosts();
Assert.assertTrue(excludeHosts.size() == 1);
Assert.assertTrue(excludeHosts.contains("0.0.0.0:123"));
String aclStringAfter=resourceManager.adminService.getAccessControlList().getAclString().trim();
Assert.assertEquals(aclStringAfter,"world:anyone:rwcda");
CapacityScheduler cs=(CapacityScheduler)resourceManager.getRMContext().getScheduler();
int maxAppsAfter=cs.getConfiguration().getMaximumSystemApplications();
Assert.assertEquals(maxAppsAfter,5000);
ServiceAuthorizationManager adminServiceServiceManager=resourceManager.adminService.getServer().getServiceAuthorizationManager();
verifyServiceACLsRefresh(adminServiceServiceManager,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString);
ServiceAuthorizationManager clientRMServiceServiceManager=resourceManager.getRMContext().getClientRMService().getServer().getServiceAuthorizationManager();
verifyServiceACLsRefresh(clientRMServiceServiceManager,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString);
ServiceAuthorizationManager appMasterService=resourceManager.getRMContext().getApplicationMasterService().getServer().getServiceAuthorizationManager();
verifyServiceACLsRefresh(appMasterService,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString);
ServiceAuthorizationManager RTService=resourceManager.getRMContext().getResourceTrackerService().getServer().getServiceAuthorizationManager();
verifyServiceACLsRefresh(RTService,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString);
Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get("hadoop.proxyuser.test.groups").size() == 1);
Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get("hadoop.proxyuser.test.groups").contains("test_groups"));
Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get("hadoop.proxyuser.test.hosts").size() == 1);
Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get("hadoop.proxyuser.test.hosts").contains("test_hosts"));
List groupAfter=Groups.getUserToGroupsMappingService(configuration).getGroups(UserGroupInformation.getCurrentUser().getUserName());
Assert.assertTrue(groupAfter.contains("test_group_D") && groupAfter.contains("test_group_E") && groupAfter.contains("test_group_F")&& groupAfter.size() == 3);
}
finally {
if (resourceManager != null) {
resourceManager.stop();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider() throws IOException, YarnException {
configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
String[] defaultTestUserGroups={"dummy_group1","dummy_group2"};
UserGroupInformation ugi=UserGroupInformation.createUserForTesting("dummyUser",defaultTestUserGroups);
String user=ugi.getUserName();
List groupWithInit=new ArrayList(2);
for (int i=0; i < ugi.getGroupNames().length; i++) {
groupWithInit.add(ugi.getGroupNames()[i]);
}
uploadDefaultConfiguration();
Configuration conf=new Configuration();
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MockUnixGroupsMapping.class,GroupMappingServiceProvider.class);
uploadConfiguration(conf,"core-site.xml");
try {
rm=new MockRM(configuration);
rm.init(configuration);
rm.start();
}
catch ( Exception ex) {
fail("Should not get any exceptions");
}
List groupBefore=new ArrayList(Groups.getUserToGroupsMappingService(configuration).getGroups(user));
Assert.assertTrue(groupBefore.contains("test_group_A") && groupBefore.contains("test_group_B") && groupBefore.contains("test_group_C")&& groupBefore.size() == 3);
Assert.assertTrue(groupWithInit.size() != groupBefore.size());
Assert.assertFalse(groupWithInit.contains("test_group_A") || groupWithInit.contains("test_group_B") || groupWithInit.contains("test_group_C"));
MockUnixGroupsMapping.updateGroups();
rm.adminService.refreshUserToGroupsMappings(RefreshUserToGroupsMappingsRequest.newInstance());
List groupAfter=Groups.getUserToGroupsMappingService(configuration).getGroups(user);
Assert.assertTrue(groupAfter.contains("test_group_D") && groupAfter.contains("test_group_E") && groupAfter.contains("test_group_F")&& groupAfter.size() == 3);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testReconnnectUpdate(){
final String nmVersion1="nm version 1";
final String nmVersion2="nm version 2";
RMNodeImpl node=getRunningNode(nmVersion1);
Assert.assertEquals(nmVersion1,node.getNodeManagerVersion());
RMNodeImpl reconnectingNode=getRunningNode(nmVersion2);
node.handle(new RMNodeReconnectEvent(node.getNodeID(),reconnectingNode,null));
Assert.assertEquals(nmVersion2,node.getNodeManagerVersion());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMRestartAppRunningAMFailed() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200);
MockAM am0=launchAM(app0,rm1,nm1);
nm1.nodeHeartbeat(am0.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am0.waitForState(RMAppAttemptState.FAILED);
ApplicationState appState=rmAppState.get(app0.getApplicationId());
Assert.assertEquals(RMAppAttemptState.FAILED,appState.getAttempt(am0.getApplicationAttemptId()).getState());
Assert.assertNull(rmAppState.get(app0.getApplicationId()).getState());
rm1.waitForState(app0.getApplicationId(),RMAppState.ACCEPTED);
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.FAILED);
rm1.stop();
rm2.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("rawtypes") @Test(timeout=180000) public void testRMRestart() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
MockNM nm2=new MockNM("127.0.0.2:5678",15120,rm1.getResourceTrackerService());
nm1.registerNode();
nm2.registerNode();
RMApp app0=rm1.submitApp(200);
RMAppAttempt attempt0=app0.getCurrentAppAttempt();
Assert.assertEquals(1,rmAppState.size());
nm1.nodeHeartbeat(true);
MockAM am0=rm1.sendAMLaunched(attempt0.getAppAttemptId());
am0.registerAppAttempt();
finishApplicationMaster(app0,rm1,nm1,am0);
RMApp app1=rm1.submitApp(200);
ApplicationState appState=rmAppState.get(app1.getApplicationId());
Assert.assertNotNull(appState);
Assert.assertEquals(0,appState.getAttemptCount());
Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app1.getApplicationSubmissionContext().getApplicationId());
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
ApplicationAttemptId attemptId1=attempt1.getAppAttemptId();
rm1.waitForState(attemptId1,RMAppAttemptState.ALLOCATED);
Assert.assertEquals(1,appState.getAttemptCount());
ApplicationAttemptState attemptState=appState.getAttempt(attemptId1);
Assert.assertNotNull(attemptState);
Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId());
MockAM am1=rm1.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
am1.allocate("127.0.0.1",1000,1,new ArrayList());
nm1.nodeHeartbeat(true);
List conts=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
while (conts.size() == 0) {
nm1.nodeHeartbeat(true);
conts.addAll(am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers());
Thread.sleep(500);
}
RMApp app2=rm1.submitApp(200);
appState=rmAppState.get(app2.getApplicationId());
Assert.assertNotNull(appState);
Assert.assertEquals(0,appState.getAttemptCount());
Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app2.getApplicationSubmissionContext().getApplicationId());
RMApp appUnmanaged=rm1.submitApp(200,"someApp","someUser",null,true,null,conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS),null);
ApplicationAttemptId unmanagedAttemptId=appUnmanaged.getCurrentAppAttempt().getAppAttemptId();
ApplicationId unmanagedAppId=appUnmanaged.getApplicationId();
appState=rmAppState.get(unmanagedAppId);
Assert.assertNotNull(appState);
rm1.waitForState(unmanagedAttemptId,RMAppAttemptState.LAUNCHED);
rm1.waitForState(unmanagedAppId,RMAppState.ACCEPTED);
Assert.assertEquals(1,appState.getAttemptCount());
Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),appUnmanaged.getApplicationSubmissionContext().getApplicationId());
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
nm2.setResourceTrackerService(rm2.getResourceTrackerService());
Assert.assertEquals(4,rm2.getRMContext().getRMApps().size());
rm2.waitForState(app0.getApplicationId(),RMAppState.FINISHED);
rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.FINISHED);
RMApp loadedApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId());
Assert.assertNotNull(loadedApp1);
Assert.assertEquals(1,loadedApp1.getAppAttempts().size());
Assert.assertEquals(app1.getApplicationSubmissionContext().getApplicationId(),loadedApp1.getApplicationSubmissionContext().getApplicationId());
RMApp loadedApp2=rm2.getRMContext().getRMApps().get(app2.getApplicationId());
Assert.assertNotNull(loadedApp2);
Assert.assertEquals(app2.getApplicationSubmissionContext().getApplicationId(),loadedApp2.getApplicationSubmissionContext().getApplicationId());
rm2.waitForState(loadedApp1.getApplicationId(),RMAppState.ACCEPTED);
rm2.waitForState(loadedApp2.getApplicationId(),RMAppState.ACCEPTED);
Assert.assertEquals(1,loadedApp1.getAppAttempts().size());
Assert.assertEquals(1,loadedApp2.getAppAttempts().size());
am1.setAMRMProtocol(rm2.getApplicationMasterService(),rm2.getRMContext());
AllocateResponse allocResponse=am1.allocate(new ArrayList(),new ArrayList());
Assert.assertEquals(AMCommand.AM_SHUTDOWN,allocResponse.getAMCommand());
NodeHeartbeatResponse hbResponse=nm1.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction());
hbResponse=nm2.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction());
nm1=new MockNM("127.0.0.1:1234",15120,rm2.getResourceTrackerService());
nm2=new MockNM("127.0.0.2:5678",15120,rm2.getResourceTrackerService());
NMContainerStatus status=TestRMRestart.createNMContainerStatus(loadedApp1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
nm1.registerNode(Arrays.asList(status),null);
nm2.registerNode();
rm2.waitForState(loadedApp1.getApplicationId(),RMAppState.ACCEPTED);
int timeoutSecs=0;
while (loadedApp1.getAppAttempts().size() != 2 && timeoutSecs++ < 40) {
;
Thread.sleep(200);
}
hbResponse=nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.RESYNC != hbResponse.getNodeAction());
hbResponse=nm2.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.RESYNC != hbResponse.getNodeAction());
attempt1=loadedApp1.getCurrentAppAttempt();
attemptId1=attempt1.getAppAttemptId();
rm2.waitForState(attemptId1,RMAppAttemptState.ALLOCATED);
appState=rmAppState.get(loadedApp1.getApplicationId());
attemptState=appState.getAttempt(attemptId1);
Assert.assertNotNull(attemptState);
Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId());
MockNM am1Node=nm1;
if (attemptState.getMasterContainer().getNodeId().toString().contains("127.0.0.2")) {
am1Node=nm2;
}
RMAppAttempt attempt2=loadedApp2.getCurrentAppAttempt();
ApplicationAttemptId attemptId2=attempt2.getAppAttemptId();
rm2.waitForState(attemptId2,RMAppAttemptState.ALLOCATED);
appState=rmAppState.get(loadedApp2.getApplicationId());
attemptState=appState.getAttempt(attemptId2);
Assert.assertNotNull(attemptState);
Assert.assertEquals(BuilderUtils.newContainerId(attemptId2,1),attemptState.getMasterContainer().getId());
MockNM am2Node=nm1;
if (attemptState.getMasterContainer().getNodeId().toString().contains("127.0.0.2")) {
am2Node=nm2;
}
am1=rm2.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
MockAM am2=rm2.sendAMLaunched(attempt2.getAppAttemptId());
am2.registerAppAttempt();
am1.allocate("127.0.0.1",1000,3,new ArrayList());
am2.allocate("127.0.0.2",1000,1,new ArrayList());
nm1.nodeHeartbeat(true);
nm2.nodeHeartbeat(true);
conts=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
while (conts.size() == 0) {
nm1.nodeHeartbeat(true);
nm2.nodeHeartbeat(true);
conts.addAll(am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers());
Thread.sleep(500);
}
finishApplicationMaster(loadedApp1,rm2,am1Node,am1);
finishApplicationMaster(loadedApp2,rm2,am2Node,am2);
rm2.stop();
rm1.stop();
Assert.assertEquals(4,rmAppState.size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMRestartFailedApp() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200);
MockAM am0=launchAM(app0,rm1,nm1);
nm1.nodeHeartbeat(am0.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am0.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app0.getApplicationId(),RMAppState.FAILED);
ApplicationState appState=rmAppState.get(app0.getApplicationId());
Assert.assertEquals(RMAppState.FAILED,appState.getState());
Assert.assertEquals(RMAppAttemptState.FAILED,appState.getAttempt(am0.getApplicationAttemptId()).getState());
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
RMApp loadedApp0=rm2.getRMContext().getRMApps().get(app0.getApplicationId());
rm2.waitForState(app0.getApplicationId(),RMAppState.FAILED);
rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.FAILED);
Assert.assertEquals(1,loadedApp0.getAppAttempts().size());
verifyAppReportAfterRMRestart(app0,rm2);
Assert.assertTrue(app0.getDiagnostics().toString().contains("Failing the application."));
rm1.stop();
rm2.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMStateStoreDispatcherDrainedOnRMStop() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore(){
volatile boolean wait=true;
@Override public void serviceStop() throws Exception {
wait=false;
super.serviceStop();
}
@Override protected void handleStoreEvent( RMStateStoreEvent event){
while (wait) ;
super.handleStoreEvent(event);
}
}
;
memStore.init(conf);
final MockRM rm1=new MockRM(conf,memStore);
rm1.start();
final ArrayList appList=new ArrayList();
final int NUM_APPS=5;
for (int i=0; i < NUM_APPS; i++) {
RMApp app=rm1.submitApp(200,"name","user",new HashMap(),false,"default",-1,null,"MAPREDUCE",false);
appList.add(app);
rm1.waitForState(app.getApplicationId(),RMAppState.NEW_SAVING);
}
Map rmAppState=memStore.getState().getApplicationState();
Assert.assertTrue(rmAppState.size() == 0);
rm1.stop();
for ( RMApp app : appList) {
ApplicationState appState=rmAppState.get(app.getApplicationId());
Assert.assertNotNull(appState);
Assert.assertEquals(0,appState.getAttemptCount());
Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app.getApplicationSubmissionContext().getApplicationId());
}
Assert.assertTrue(rmAppState.size() == NUM_APPS);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testRMRestartSucceededApp() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200);
MockAM am0=launchAM(app0,rm1,nm1);
FinishApplicationMasterRequest req=FinishApplicationMasterRequest.newInstance(FinalApplicationStatus.SUCCEEDED,"diagnostics","trackingUrl");
finishApplicationMaster(app0,rm1,nm1,am0,req);
ApplicationState appState=rmAppState.get(app0.getApplicationId());
ApplicationAttemptState attemptState0=appState.getAttempt(am0.getApplicationAttemptId());
Assert.assertEquals("diagnostics",attemptState0.getDiagnostics());
Assert.assertEquals(FinalApplicationStatus.SUCCEEDED,attemptState0.getFinalApplicationStatus());
Assert.assertEquals("trackingUrl",attemptState0.getFinalTrackingUrl());
Assert.assertEquals(app0.getFinishTime(),appState.getFinishTime());
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
ApplicationReport appReport=verifyAppReportAfterRMRestart(app0,rm2);
Assert.assertEquals(FinalApplicationStatus.SUCCEEDED,appReport.getFinalApplicationStatus());
Assert.assertEquals("trackingUrl",appReport.getOriginalTrackingUrl());
rm1.stop();
rm2.stop();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testDelegationTokenRestoredInDelegationTokenRenewer() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new TestSecurityMockRM(conf,memStore);
rm1.start();
HashSet> tokenSet=new HashSet>();
Credentials ts=new Credentials();
Text userText1=new Text("user1");
RMDelegationTokenIdentifier dtId1=new RMDelegationTokenIdentifier(userText1,new Text("renewer1"),userText1);
Token token1=new Token(dtId1,rm1.getRMContext().getRMDelegationTokenSecretManager());
SecurityUtil.setTokenService(token1,rmAddr);
ts.addToken(userText1,token1);
tokenSet.add(token1);
Text userText2=new Text("user2");
RMDelegationTokenIdentifier dtId2=new RMDelegationTokenIdentifier(userText2,new Text("renewer2"),userText2);
Token token2=new Token(dtId2,rm1.getRMContext().getRMDelegationTokenSecretManager());
SecurityUtil.setTokenService(token2,rmAddr);
ts.addToken(userText2,token2);
tokenSet.add(token2);
RMApp app=rm1.submitApp(200,"name","user",new HashMap(),false,"default",1,ts);
ApplicationState appState=rmAppState.get(app.getApplicationId());
Assert.assertNotNull(appState);
Assert.assertEquals(tokenSet,rm1.getRMContext().getDelegationTokenRenewer().getDelegationTokens());
DataOutputBuffer dob=new DataOutputBuffer();
ts.writeTokenStorageToStream(dob);
ByteBuffer securityTokens=ByteBuffer.wrap(dob.getData(),0,dob.getLength());
securityTokens.rewind();
Assert.assertEquals(securityTokens,appState.getApplicationSubmissionContext().getAMContainerSpec().getTokens());
MockRM rm2=new TestSecurityMockRM(conf,memStore);
rm2.start();
waitForTokensToBeRenewed(rm2);
Assert.assertEquals(tokenSet,rm2.getRMContext().getDelegationTokenRenewer().getDelegationTokens());
rm1.stop();
rm2.stop();
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMRestartGetApplicationList() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType");
MockAM am0=launchAM(app0,rm1,nm1);
finishApplicationMaster(app0,rm1,nm1,am0);
RMApp app1=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType");
MockAM am1=launchAM(app1,rm1,nm1);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am1.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app1.getApplicationId(),RMAppState.FAILED);
RMApp app2=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType");
MockAM am2=launchAM(app2,rm1,nm1);
rm1.killApp(app2.getApplicationId());
rm1.waitForState(app2.getApplicationId(),RMAppState.KILLED);
rm1.waitForState(am2.getApplicationAttemptId(),RMAppAttemptState.KILLED);
MockRM rm2=new MockRM(conf,memStore){
@Override protected RMAppManager createRMAppManager(){
return spy(super.createRMAppManager());
}
}
;
rm2.start();
GetApplicationsRequest request1=GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.FINISHED,YarnApplicationState.KILLED,YarnApplicationState.FAILED));
GetApplicationsResponse response1=rm2.getClientRMService().getApplications(request1);
List appList1=response1.getApplicationList();
boolean forApp0=false, forApp1=false, forApp2=false;
for ( ApplicationReport report : appList1) {
if (report.getApplicationId().equals(app0.getApplicationId())) {
Assert.assertEquals(YarnApplicationState.FINISHED,report.getYarnApplicationState());
forApp0=true;
}
if (report.getApplicationId().equals(app1.getApplicationId())) {
Assert.assertEquals(YarnApplicationState.FAILED,report.getYarnApplicationState());
forApp1=true;
}
if (report.getApplicationId().equals(app2.getApplicationId())) {
Assert.assertEquals(YarnApplicationState.KILLED,report.getYarnApplicationState());
forApp2=true;
}
}
Assert.assertTrue(forApp0 && forApp1 && forApp2);
Set appTypes=new HashSet();
appTypes.add("myType");
GetApplicationsRequest request2=GetApplicationsRequest.newInstance(appTypes);
GetApplicationsResponse response2=rm2.getClientRMService().getApplications(request2);
List appList2=response2.getApplicationList();
Assert.assertTrue(3 == appList2.size());
verify(rm2.getRMAppManager(),times(3)).logApplicationSummary(isA(ApplicationId.class));
rm1.stop();
rm2.stop();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMRestartOnMaxAppAttempts() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200,"name","user",new HashMap(),false,"default",1,null);
RMApp app2=rm1.submitApp(200,"name","user",new HashMap(),false,"default",-1,null);
ApplicationState appState=rmAppState.get(app1.getApplicationId());
Assert.assertNotNull(appState);
Assert.assertEquals(0,appState.getAttemptCount());
Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app1.getApplicationSubmissionContext().getApplicationId());
nm1.nodeHeartbeat(true);
RMAppAttempt attempt=app1.getCurrentAppAttempt();
ApplicationAttemptId attemptId1=attempt.getAppAttemptId();
rm1.waitForState(attemptId1,RMAppAttemptState.ALLOCATED);
Assert.assertEquals(1,appState.getAttemptCount());
ApplicationAttemptState attemptState=appState.getAttempt(attemptId1);
Assert.assertNotNull(attemptState);
Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId());
conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,3000);
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
Assert.assertEquals(2,rm2.getRMContext().getRMApps().get(app2.getApplicationId()).getMaxAppAttempts());
Assert.assertEquals(2,rm2.getRMContext().getRMApps().size());
rm2.waitForState(app1.getApplicationId(),RMAppState.FAILED);
rm2.waitForState(app2.getApplicationId(),RMAppState.ACCEPTED);
Assert.assertEquals(RMAppState.FAILED,rmAppState.get(app1.getApplicationId()).getState());
Assert.assertNull(rmAppState.get(app2.getApplicationId()).getState());
rm1.stop();
rm2.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testRMRestartWaitForPreviousAMToFinish() throws Exception {
YarnConfiguration conf=new YarnConfiguration(this.conf);
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,40);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
final MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",16382,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
MockAM am1=launchAM(app1,rm1,nm1);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am1.waitForState(RMAppAttemptState.FAILED);
MockAM am2=launchAM(app1,rm1,nm1);
Assert.assertEquals(1,rmAppState.size());
Assert.assertEquals(app1.getState(),RMAppState.RUNNING);
Assert.assertEquals(app1.getAppAttempts().get(app1.getCurrentAppAttempt().getAppAttemptId()).getAppAttemptState(),RMAppAttemptState.RUNNING);
MockRM rm2=null;
rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
NodeHeartbeatResponse res=nm1.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.RESYNC,res.getNodeAction());
RMApp rmApp=rm2.getRMContext().getRMApps().get(app1.getApplicationId());
rm2.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
Assert.assertEquals(RMAppState.ACCEPTED,rmApp.getState());
Assert.assertEquals(2,rmApp.getAppAttempts().size());
rm2.waitForState(am1.getApplicationAttemptId(),RMAppAttemptState.FAILED);
rm2.waitForState(am2.getApplicationAttemptId(),RMAppAttemptState.LAUNCHED);
Assert.assertEquals(RMAppAttemptState.FAILED,rmApp.getAppAttempts().get(am1.getApplicationAttemptId()).getAppAttemptState());
Assert.assertEquals(RMAppAttemptState.LAUNCHED,rmApp.getAppAttempts().get(am2.getApplicationAttemptId()).getAppAttemptState());
NMContainerStatus status=TestRMRestart.createNMContainerStatus(am2.getApplicationAttemptId(),1,ContainerState.COMPLETE);
nm1.registerNode(Arrays.asList(status),null);
rm2.waitForState(am2.getApplicationAttemptId(),RMAppAttemptState.FAILED);
launchAM(rmApp,rm2,nm1);
Assert.assertEquals(3,rmApp.getAppAttempts().size());
rm2.waitForState(rmApp.getCurrentAppAttempt().getAppAttemptId(),RMAppAttemptState.RUNNING);
conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,10000);
MockRM rm3=null;
rm3=new MockRM(conf,memStore);
rm3.start();
nm1.setResourceTrackerService(rm3.getResourceTrackerService());
rmApp=rm3.getRMContext().getRMApps().get(app1.getApplicationId());
rm3.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
Assert.assertEquals(rmApp.getState(),RMAppState.ACCEPTED);
Assert.assertEquals(3,rmApp.getAppAttempts().size());
rm3.waitForState(am1.getApplicationAttemptId(),RMAppAttemptState.FAILED);
rm3.waitForState(am2.getApplicationAttemptId(),RMAppAttemptState.FAILED);
ApplicationAttemptId latestAppAttemptId=rmApp.getCurrentAppAttempt().getAppAttemptId();
rm3.waitForState(latestAppAttemptId,RMAppAttemptState.LAUNCHED);
Assert.assertEquals(RMAppAttemptState.FAILED,rmApp.getAppAttempts().get(am1.getApplicationAttemptId()).getAppAttemptState());
Assert.assertEquals(RMAppAttemptState.FAILED,rmApp.getAppAttempts().get(am2.getApplicationAttemptId()).getAppAttemptState());
Assert.assertEquals(RMAppAttemptState.LAUNCHED,rmApp.getAppAttempts().get(latestAppAttemptId).getAppAttemptState());
rm3.waitForState(latestAppAttemptId,RMAppAttemptState.FAILED);
rm3.waitForState(rmApp.getApplicationId(),RMAppState.ACCEPTED);
Assert.assertEquals(4,rmApp.getAppAttempts().size());
Assert.assertEquals(RMAppAttemptState.FAILED,rmApp.getAppAttempts().get(latestAppAttemptId).getAppAttemptState());
latestAppAttemptId=rmApp.getCurrentAppAttempt().getAppAttemptId();
RMApp app2=rm3.submitApp(200);
rm3.waitForState(app2.getApplicationId(),RMAppState.ACCEPTED);
Assert.assertEquals(1,app2.getAppAttempts().size());
Assert.assertEquals(0,memStore.getState().getApplicationState().get(app2.getApplicationId()).getAttemptCount());
MockRM rm4=null;
rm4=new MockRM(conf,memStore);
rm4.start();
rmApp=rm4.getRMContext().getRMApps().get(app1.getApplicationId());
rm4.waitForState(rmApp.getApplicationId(),RMAppState.ACCEPTED);
int timeoutSecs=0;
while (rmApp.getAppAttempts().size() != 2 && timeoutSecs++ < 40) {
Thread.sleep(200);
}
Assert.assertEquals(4,rmApp.getAppAttempts().size());
Assert.assertEquals(RMAppState.ACCEPTED,rmApp.getState());
rm4.waitForState(latestAppAttemptId,RMAppAttemptState.SCHEDULED);
Assert.assertEquals(RMAppAttemptState.SCHEDULED,rmApp.getAppAttempts().get(latestAppAttemptId).getAppAttemptState());
app2=rm4.getRMContext().getRMApps().get(app2.getApplicationId());
rm4.waitForState(app2.getApplicationId(),RMAppState.ACCEPTED);
Assert.assertEquals(RMAppState.ACCEPTED,app2.getState());
Assert.assertEquals(1,app2.getAppAttempts().size());
rm4.waitForState(app2.getCurrentAppAttempt().getAppAttemptId(),RMAppAttemptState.SCHEDULED);
Assert.assertEquals(RMAppAttemptState.SCHEDULED,app2.getCurrentAppAttempt().getAppAttemptState());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAppAttemptTokensRestoredOnRMRestart() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new TestSecurityMockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("0.0.0.0:4321",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200,"name","user",new HashMap(),"default");
ApplicationState appState=rmAppState.get(app1.getApplicationId());
Assert.assertNotNull(appState);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
ApplicationAttemptId attemptId1=attempt1.getAppAttemptId();
rm1.waitForState(attemptId1,RMAppAttemptState.ALLOCATED);
ApplicationAttemptState attemptState=appState.getAttempt(attemptId1);
Assert.assertNotNull(attemptState);
Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId());
byte[] clientTokenMasterKey=attempt1.getClientTokenMasterKey().getEncoded();
Credentials savedCredentials=attemptState.getAppAttemptCredentials();
Assert.assertArrayEquals("client token master key not saved",clientTokenMasterKey,savedCredentials.getSecretKey(RMStateStore.AM_CLIENT_TOKEN_MASTER_KEY_NAME));
MockRM rm2=new TestSecurityMockRM(conf,memStore);
rm2.start();
RMApp loadedApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId());
RMAppAttempt loadedAttempt1=loadedApp1.getRMAppAttempt(attemptId1);
Assert.assertNotNull(loadedAttempt1);
Assert.assertEquals("client token master key not restored",attempt1.getClientTokenMasterKey(),loadedAttempt1.getClientTokenMasterKey());
Assert.assertArrayEquals(clientTokenMasterKey,rm2.getClientToAMTokenSecretManager().getMasterKey(attemptId1).getEncoded());
Token amrmToken=loadedAttempt1.getAMRMToken();
Assert.assertArrayEquals(amrmToken.getPassword(),rm2.getRMContext().getAMRMTokenSecretManager().retrievePassword(amrmToken.decodeIdentifier()));
rm1.stop();
rm2.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testRMRestartKilledApp() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200);
MockAM am0=launchAM(app0,rm1,nm1);
rm1.killApp(app0.getApplicationId());
rm1.waitForState(app0.getApplicationId(),RMAppState.KILLED);
rm1.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.KILLED);
ApplicationState appState=rmAppState.get(app0.getApplicationId());
Assert.assertEquals(RMAppState.KILLED,appState.getState());
Assert.assertEquals(RMAppAttemptState.KILLED,appState.getAttempt(am0.getApplicationAttemptId()).getState());
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
RMApp loadedApp0=rm2.getRMContext().getRMApps().get(app0.getApplicationId());
rm2.waitForState(app0.getApplicationId(),RMAppState.KILLED);
rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.KILLED);
Assert.assertEquals(1,loadedApp0.getAppAttempts().size());
ApplicationReport appReport=verifyAppReportAfterRMRestart(app0,rm2);
Assert.assertEquals(app0.getDiagnostics().toString(),appReport.getDiagnostics());
rm1.stop();
rm2.stop();
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMDelegationTokenRestoredOnRMRestart() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
conf.set(YarnConfiguration.RM_ADDRESS,"localhost:8032");
UserGroupInformation.setConfiguration(conf);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
Map rmDTState=rmState.getRMDTSecretManagerState().getTokenState();
Set rmDTMasterKeyState=rmState.getRMDTSecretManagerState().getMasterKeyState();
MockRM rm1=new TestSecurityMockRM(conf,memStore);
rm1.start();
Credentials ts=new Credentials();
GetDelegationTokenRequest request1=GetDelegationTokenRequest.newInstance("renewer1");
UserGroupInformation.getCurrentUser().setAuthenticationMethod(AuthMethod.KERBEROS);
GetDelegationTokenResponse response1=rm1.getClientRMService().getDelegationToken(request1);
org.apache.hadoop.yarn.api.records.Token delegationToken1=response1.getRMDelegationToken();
Token token1=ConverterUtils.convertFromYarn(delegationToken1,rmAddr);
RMDelegationTokenIdentifier dtId1=token1.decodeIdentifier();
HashSet tokenIdentSet=new HashSet();
ts.addToken(token1.getService(),token1);
tokenIdentSet.add(dtId1);
RMApp app=rm1.submitApp(200,"name","user",new HashMap(),false,"default",1,ts);
ApplicationState appState=rmAppState.get(app.getApplicationId());
Assert.assertNotNull(appState);
Set allKeysRM1=rm1.getRMContext().getRMDelegationTokenSecretManager().getAllMasterKeys();
Assert.assertEquals(allKeysRM1,rmDTMasterKeyState);
Map allTokensRM1=rm1.getRMContext().getRMDelegationTokenSecretManager().getAllTokens();
Assert.assertEquals(tokenIdentSet,allTokensRM1.keySet());
Assert.assertEquals(allTokensRM1,rmDTState);
Assert.assertEquals(rm1.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber(),rmState.getRMDTSecretManagerState().getDTSequenceNumber());
GetDelegationTokenRequest request2=GetDelegationTokenRequest.newInstance("renewer2");
GetDelegationTokenResponse response2=rm1.getClientRMService().getDelegationToken(request2);
org.apache.hadoop.yarn.api.records.Token delegationToken2=response2.getRMDelegationToken();
Token token2=ConverterUtils.convertFromYarn(delegationToken2,rmAddr);
RMDelegationTokenIdentifier dtId2=token2.decodeIdentifier();
try {
rm1.getRMContext().getRMDelegationTokenSecretManager().cancelToken(token2,UserGroupInformation.getCurrentUser().getUserName());
}
catch ( Exception e) {
Assert.fail();
}
Assert.assertEquals(rm1.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber(),dtId2.getSequenceNumber());
Assert.assertFalse(rmDTState.containsKey(dtId2));
MockRM rm2=new TestSecurityMockRM(conf,memStore);
rm2.start();
Map allTokensRM2=rm2.getRMContext().getRMDelegationTokenSecretManager().getAllTokens();
Assert.assertEquals(allTokensRM2.keySet(),allTokensRM1.keySet());
Assert.assertTrue(rm2.getRMContext().getRMDelegationTokenSecretManager().getAllMasterKeys().containsAll(allKeysRM1));
Assert.assertEquals(rm1.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber(),rm2.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber());
Long renewDateBeforeRenew=allTokensRM2.get(dtId1);
try {
Thread.sleep(1);
rm2.getRMContext().getRMDelegationTokenSecretManager().renewToken(token1,"renewer1");
}
catch ( Exception e) {
Assert.fail();
}
allTokensRM2=rm2.getRMContext().getRMDelegationTokenSecretManager().getAllTokens();
Long renewDateAfterRenew=allTokensRM2.get(dtId1);
Assert.assertTrue(renewDateAfterRenew > renewDateBeforeRenew);
Assert.assertTrue(rmDTState.containsValue(renewDateAfterRenew));
Assert.assertFalse(rmDTState.containsValue(renewDateBeforeRenew));
try {
rm2.getRMContext().getRMDelegationTokenSecretManager().cancelToken(token1,UserGroupInformation.getCurrentUser().getUserName());
}
catch ( Exception e) {
Assert.fail();
}
allTokensRM2=rm2.getRMContext().getRMDelegationTokenSecretManager().getAllTokens();
Assert.assertFalse(allTokensRM2.containsKey(dtId1));
Assert.assertFalse(rmDTState.containsKey(dtId1));
rm1.stop();
rm2.stop();
}
APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=50000) public void testFilterOverrides() throws Exception {
String filterInitializerConfKey="hadoop.http.filter.initializers";
String[] filterInitializers={AuthenticationFilterInitializer.class.getName(),RMAuthenticationFilterInitializer.class.getName(),AuthenticationFilterInitializer.class.getName() + "," + RMAuthenticationFilterInitializer.class.getName(),AuthenticationFilterInitializer.class.getName() + ", " + RMAuthenticationFilterInitializer.class.getName(),AuthenticationFilterInitializer.class.getName() + ", " + this.getClass().getName()};
for ( String filterInitializer : filterInitializers) {
resourceManager=new ResourceManager();
Configuration conf=new YarnConfiguration();
conf.set(filterInitializerConfKey,filterInitializer);
conf.set("hadoop.security.authentication","kerberos");
conf.set("hadoop.http.authentication.type","kerberos");
try {
try {
UserGroupInformation.setConfiguration(conf);
}
catch ( Exception e) {
LOG.info("Got expected exception");
}
resourceManager.init(conf);
resourceManager.startWepApp();
}
catch ( RuntimeException e) {
String tmp=resourceManager.getConfig().get(filterInitializerConfKey);
if (filterInitializer.contains(this.getClass().getName())) {
Assert.assertEquals(RMAuthenticationFilterInitializer.class.getName() + "," + this.getClass().getName(),tmp);
}
else {
Assert.assertEquals(RMAuthenticationFilterInitializer.class.getName(),tmp);
}
resourceManager.stop();
}
}
String[] simpleFilterInitializers={"",StaticUserWebFilter.class.getName()};
for ( String filterInitializer : simpleFilterInitializers) {
resourceManager=new ResourceManager();
Configuration conf=new YarnConfiguration();
conf.set(filterInitializerConfKey,filterInitializer);
try {
UserGroupInformation.setConfiguration(conf);
resourceManager.init(conf);
resourceManager.startWepApp();
}
catch ( RuntimeException e) {
String tmp=resourceManager.getConfig().get(filterInitializerConfKey);
if (filterInitializer.equals(StaticUserWebFilter.class.getName())) {
Assert.assertEquals(RMAuthenticationFilterInitializer.class.getName() + "," + StaticUserWebFilter.class.getName(),tmp);
}
else {
Assert.assertEquals(RMAuthenticationFilterInitializer.class.getName(),tmp);
}
resourceManager.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testReconnectNode() throws Exception {
final DrainDispatcher dispatcher=new DrainDispatcher();
rm=new MockRM(){
@Override protected EventHandler createSchedulerEventDispatcher(){
return new SchedulerEventDispatcher(this.scheduler){
@Override public void handle( SchedulerEvent event){
scheduler.handle(event);
}
}
;
}
@Override protected Dispatcher createDispatcher(){
return dispatcher;
}
}
;
rm.start();
MockNM nm1=rm.registerNode("host1:1234",5120);
MockNM nm2=rm.registerNode("host2:5678",5120);
nm1.nodeHeartbeat(true);
nm2.nodeHeartbeat(false);
dispatcher.await();
checkUnealthyNMCount(rm,nm2,true,1);
final int expectedNMs=ClusterMetrics.getMetrics().getNumActiveNMs();
QueueMetrics metrics=rm.getResourceScheduler().getRootQueueMetrics();
Assert.assertEquals(5120,metrics.getAvailableMB());
nm1=rm.registerNode("host1:1234",5120);
NodeHeartbeatResponse response=nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction()));
dispatcher.await();
Assert.assertEquals(expectedNMs,ClusterMetrics.getMetrics().getNumActiveNMs());
checkUnealthyNMCount(rm,nm2,true,1);
nm2=rm.registerNode("host2:5678",5120);
response=nm2.nodeHeartbeat(false);
Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction()));
dispatcher.await();
Assert.assertEquals(expectedNMs,ClusterMetrics.getMetrics().getNumActiveNMs());
checkUnealthyNMCount(rm,nm2,true,1);
nm2=rm.registerNode("host2:5678",5120);
dispatcher.await();
response=nm2.nodeHeartbeat(true);
response=nm2.nodeHeartbeat(true);
dispatcher.await();
Assert.assertEquals(5120 + 5120,metrics.getAvailableMB());
nm1=rm.registerNode("host2:5678",10240);
dispatcher.await();
response=nm1.nodeHeartbeat(true);
dispatcher.await();
Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction()));
Assert.assertEquals(5120 + 10240,metrics.getAvailableMB());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testContainersNotRecoveredForCompletedApps() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8192,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
MockRM.finishAMAndVerifyAppState(app1,rm1,nm1,am1);
rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
NMContainerStatus runningContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),2,ContainerState.RUNNING);
NMContainerStatus completedContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),3,ContainerState.COMPLETE);
nm1.registerNode(Arrays.asList(runningContainer,completedContainer),null);
RMApp recoveredApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId());
assertEquals(RMAppState.FINISHED,recoveredApp1.getState());
Thread.sleep(3000);
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm2.getResourceScheduler();
assertNull(scheduler.getRMContainer(runningContainer.getContainerId()));
assertNull(scheduler.getRMContainer(completedContainer.getContainerId()));
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test(timeout=20000) public void testRecoverSchedulerAppAndAttemptSynchronously() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200);
MockAM am0=MockRM.launchAndRegisterAM(app0,rm1,nm1);
rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
Assert.assertNotNull(rm2.getResourceScheduler().getSchedulerAppInfo(am0.getApplicationAttemptId()));
((AbstractYarnScheduler)rm2.getResourceScheduler()).getTransferredContainers(am0.getApplicationAttemptId());
List containers=createNMContainerStatusForApp(am0);
nm1.registerNode(containers,null);
waitForNumContainersToRecover(2,rm2,am0.getApplicationAttemptId());
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testSchedulerRecovery() throws Exception {
conf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS,true);
conf.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,DominantResourceCalculator.class.getName());
int containerMemory=1024;
Resource containerResource=Resource.newInstance(containerMemory,1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8192,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
rm1.clearQueueMetrics(app1);
rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
RMApp recoveredApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId());
RMAppAttempt loadedAttempt1=recoveredApp1.getCurrentAppAttempt();
NMContainerStatus amContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),1,ContainerState.RUNNING);
NMContainerStatus runningContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),2,ContainerState.RUNNING);
NMContainerStatus completedContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),3,ContainerState.COMPLETE);
nm1.registerNode(Arrays.asList(amContainer,runningContainer,completedContainer),null);
waitForNumContainersToRecover(2,rm2,am1.getApplicationAttemptId());
Set launchedContainers=((RMNodeImpl)rm2.getRMContext().getRMNodes().get(nm1.getNodeId())).getLaunchedContainers();
assertTrue(launchedContainers.contains(amContainer.getContainerId()));
assertTrue(launchedContainers.contains(runningContainer.getContainerId()));
rm2.waitForState(nm1,amContainer.getContainerId(),RMContainerState.RUNNING);
rm2.waitForState(nm1,runningContainer.getContainerId(),RMContainerState.RUNNING);
rm2.waitForContainerToComplete(loadedAttempt1,completedContainer);
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm2.getResourceScheduler();
SchedulerNode schedulerNode1=scheduler.getSchedulerNode(nm1.getNodeId());
Resource usedResources=Resources.multiply(containerResource,2);
Resource nmResource=Resource.newInstance(nm1.getMemory(),nm1.getvCores());
assertTrue(schedulerNode1.isValidContainer(amContainer.getContainerId()));
assertTrue(schedulerNode1.isValidContainer(runningContainer.getContainerId()));
assertFalse(schedulerNode1.isValidContainer(completedContainer.getContainerId()));
assertEquals(2,schedulerNode1.getNumContainers());
assertEquals(Resources.subtract(nmResource,usedResources),schedulerNode1.getAvailableResource());
assertEquals(usedResources,schedulerNode1.getUsedResource());
Resource availableResources=Resources.subtract(nmResource,usedResources);
Map schedulerApps=((AbstractYarnScheduler)rm2.getResourceScheduler()).getSchedulerApplications();
SchedulerApplication schedulerApp=schedulerApps.get(recoveredApp1.getApplicationId());
if (schedulerClass.equals(CapacityScheduler.class)) {
checkCSQueue(rm2,schedulerApp,nmResource,nmResource,usedResources,2);
}
else if (schedulerClass.equals(FifoScheduler.class)) {
checkFifoQueue(schedulerApp,usedResources,availableResources);
}
SchedulerApplicationAttempt schedulerAttempt=schedulerApp.getCurrentAppAttempt();
assertTrue(schedulerAttempt.getLiveContainers().contains(scheduler.getRMContainer(amContainer.getContainerId())));
assertTrue(schedulerAttempt.getLiveContainers().contains(scheduler.getRMContainer(runningContainer.getContainerId())));
assertEquals(schedulerAttempt.getCurrentConsumption(),usedResources);
if (scheduler.getClass() != FairScheduler.class) {
assertEquals(availableResources,schedulerAttempt.getHeadroom());
}
assertEquals((1 << 22) + 1,schedulerAttempt.getNewContainerId());
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test(timeout=20000) public void testAMfailedBetweenRMRestart() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8192,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
NMContainerStatus amContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE);
NMContainerStatus runningContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),2,ContainerState.RUNNING);
NMContainerStatus completedContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),3,ContainerState.COMPLETE);
nm1.registerNode(Arrays.asList(amContainer,runningContainer,completedContainer),null);
rm2.waitForState(am1.getApplicationAttemptId(),RMAppAttemptState.FAILED);
Thread.sleep(3000);
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm2.getResourceScheduler();
assertNull(scheduler.getRMContainer(runningContainer.getContainerId()));
assertNull(scheduler.getRMContainer(completedContainer.getContainerId()));
rm2.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),2,nm1);
MockNM nm2=new MockNM("127.1.1.1:4321",8192,rm2.getResourceTrackerService());
NMContainerStatus previousAttemptContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),4,ContainerState.RUNNING);
nm2.registerNode(Arrays.asList(previousAttemptContainer),null);
Thread.sleep(3000);
assertNull(scheduler.getRMContainer(previousAttemptContainer.getContainerId()));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAMRMUnusableNodes() throws Exception {
MockNM nm1=rm.registerNode("127.0.0.1:1234",10000);
MockNM nm2=rm.registerNode("127.0.0.2:1234",10000);
MockNM nm3=rm.registerNode("127.0.0.3:1234",10000);
MockNM nm4=rm.registerNode("127.0.0.4:1234",10000);
dispatcher.await();
RMApp app1=rm.submitApp(2000);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
MockAM am1=rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
AllocateRequest allocateRequest1=AllocateRequest.newInstance(0,0F,null,null,null);
AllocateResponse response1=allocate(attempt1.getAppAttemptId(),allocateRequest1);
List updatedNodes=response1.getUpdatedNodes();
Assert.assertEquals(0,updatedNodes.size());
syncNodeHeartbeat(nm4,false);
allocateRequest1=AllocateRequest.newInstance(response1.getResponseId(),0F,null,null,null);
response1=allocate(attempt1.getAppAttemptId(),allocateRequest1);
updatedNodes=response1.getUpdatedNodes();
Assert.assertEquals(1,updatedNodes.size());
NodeReport nr=updatedNodes.iterator().next();
Assert.assertEquals(nm4.getNodeId(),nr.getNodeId());
Assert.assertEquals(NodeState.UNHEALTHY,nr.getNodeState());
response1=allocate(attempt1.getAppAttemptId(),allocateRequest1);
updatedNodes=response1.getUpdatedNodes();
Assert.assertEquals(1,updatedNodes.size());
nr=updatedNodes.iterator().next();
Assert.assertEquals(nm4.getNodeId(),nr.getNodeId());
Assert.assertEquals(NodeState.UNHEALTHY,nr.getNodeState());
syncNodeLost(nm3);
allocateRequest1=AllocateRequest.newInstance(response1.getResponseId(),0F,null,null,null);
response1=allocate(attempt1.getAppAttemptId(),allocateRequest1);
updatedNodes=response1.getUpdatedNodes();
Assert.assertEquals(1,updatedNodes.size());
nr=updatedNodes.iterator().next();
Assert.assertEquals(nm3.getNodeId(),nr.getNodeId());
Assert.assertEquals(NodeState.LOST,nr.getNodeState());
RMApp app2=rm.submitApp(2000);
nm2.nodeHeartbeat(true);
RMAppAttempt attempt2=app2.getCurrentAppAttempt();
MockAM am2=rm.sendAMLaunched(attempt2.getAppAttemptId());
am2.registerAppAttempt();
AllocateRequest allocateRequest2=AllocateRequest.newInstance(0,0F,null,null,null);
AllocateResponse response2=allocate(attempt2.getAppAttemptId(),allocateRequest2);
updatedNodes=response2.getUpdatedNodes();
Assert.assertEquals(0,updatedNodes.size());
syncNodeHeartbeat(nm4,true);
allocateRequest1=AllocateRequest.newInstance(response1.getResponseId(),0F,null,null,null);
response1=allocate(attempt1.getAppAttemptId(),allocateRequest1);
updatedNodes=response1.getUpdatedNodes();
Assert.assertEquals(1,updatedNodes.size());
nr=updatedNodes.iterator().next();
Assert.assertEquals(nm4.getNodeId(),nr.getNodeId());
Assert.assertEquals(NodeState.RUNNING,nr.getNodeState());
allocateRequest2=AllocateRequest.newInstance(response2.getResponseId(),0F,null,null,null);
response2=allocate(attempt2.getAppAttemptId(),allocateRequest2);
updatedNodes=response2.getUpdatedNodes();
Assert.assertEquals(1,updatedNodes.size());
nr=updatedNodes.iterator().next();
Assert.assertEquals(nm4.getNodeId(),nr.getNodeId());
Assert.assertEquals(NodeState.RUNNING,nr.getNodeState());
allocateRequest2=AllocateRequest.newInstance(response2.getResponseId(),0F,null,null,null);
response2=allocate(attempt2.getAppAttemptId(),allocateRequest2);
updatedNodes=response2.getUpdatedNodes();
Assert.assertEquals(0,updatedNodes.size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testARRMResponseId() throws Exception {
MockNM nm1=rm.registerNode("h1:1234",5000);
RMApp app=rm.submitApp(2000);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt=app.getCurrentAppAttempt();
MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId());
am.registerAppAttempt();
AllocateRequest allocateRequest=AllocateRequest.newInstance(0,0F,null,null,null);
AllocateResponse response=allocate(attempt.getAppAttemptId(),allocateRequest);
Assert.assertEquals(1,response.getResponseId());
Assert.assertTrue(response.getAMCommand() == null);
allocateRequest=AllocateRequest.newInstance(response.getResponseId(),0F,null,null,null);
response=allocate(attempt.getAppAttemptId(),allocateRequest);
Assert.assertEquals(2,response.getResponseId());
response=allocate(attempt.getAppAttemptId(),allocateRequest);
Assert.assertEquals(2,response.getResponseId());
allocateRequest=AllocateRequest.newInstance(0,0F,null,null,null);
response=allocate(attempt.getAppAttemptId(),allocateRequest);
Assert.assertTrue(response.getAMCommand() == AMCommand.AM_RESYNC);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testAMRestartWithExistingContainers() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2);
MockRM rm1=new MockRM(conf);
rm1.start();
RMApp app1=rm1.submitApp(200,"name","user",new HashMap(),false,"default",-1,null,"MAPREDUCE",false,true);
MockNM nm1=new MockNM("127.0.0.1:1234",10240,rm1.getResourceTrackerService());
nm1.registerNode();
MockNM nm2=new MockNM("127.0.0.1:2351",4089,rm1.getResourceTrackerService());
nm2.registerNode();
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
int NUM_CONTAINERS=3;
am1.allocate("127.0.0.1",1024,NUM_CONTAINERS,new ArrayList());
nm1.nodeHeartbeat(true);
List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
while (containers.size() != NUM_CONTAINERS) {
nm1.nodeHeartbeat(true);
containers.addAll(am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers());
Thread.sleep(200);
}
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),2,ContainerState.RUNNING);
ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2);
rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),3,ContainerState.RUNNING);
ContainerId containerId3=ContainerId.newInstance(am1.getApplicationAttemptId(),3);
rm1.waitForState(nm1,containerId3,RMContainerState.RUNNING);
ContainerId containerId4=ContainerId.newInstance(am1.getApplicationAttemptId(),4);
rm1.waitForState(nm1,containerId4,RMContainerState.ACQUIRED);
am1.allocate("127.0.0.1",1024,1,new ArrayList());
nm1.nodeHeartbeat(true);
ContainerId containerId5=ContainerId.newInstance(am1.getApplicationAttemptId(),5);
rm1.waitForContainerAllocated(nm1,containerId5);
rm1.waitForState(nm1,containerId5,RMContainerState.ALLOCATED);
am1.allocate("127.0.0.1",6000,1,new ArrayList());
ContainerId containerId6=ContainerId.newInstance(am1.getApplicationAttemptId(),6);
nm1.nodeHeartbeat(true);
SchedulerApplicationAttempt schedulerAttempt=((AbstractYarnScheduler)rm1.getResourceScheduler()).getCurrentAttemptForContainer(containerId6);
while (schedulerAttempt.getReservedContainers().isEmpty()) {
System.out.println("Waiting for container " + containerId6 + " to be reserved.");
nm1.nodeHeartbeat(true);
Thread.sleep(200);
}
Assert.assertEquals(containerId6,schedulerAttempt.getReservedContainers().get(0).getContainerId());
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am1.waitForState(RMAppAttemptState.FAILED);
Thread.sleep(3000);
rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING);
Assert.assertNull(rm1.getResourceScheduler().getRMContainer(containerId4));
Assert.assertNull(rm1.getResourceScheduler().getRMContainer(containerId5));
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
ApplicationAttemptId newAttemptId=app1.getCurrentAppAttempt().getAppAttemptId();
Assert.assertFalse(newAttemptId.equals(am1.getApplicationAttemptId()));
RMAppAttempt attempt2=app1.getCurrentAppAttempt();
nm1.nodeHeartbeat(true);
MockAM am2=rm1.sendAMLaunched(attempt2.getAppAttemptId());
RegisterApplicationMasterResponse registerResponse=am2.registerAppAttempt();
Assert.assertEquals(2,registerResponse.getContainersFromPreviousAttempts().size());
boolean containerId2Exists=false, containerId3Exists=false;
for ( Container container : registerResponse.getContainersFromPreviousAttempts()) {
if (container.getId().equals(containerId2)) {
containerId2Exists=true;
}
if (container.getId().equals(containerId3)) {
containerId3Exists=true;
}
}
Assert.assertTrue(containerId2Exists && containerId3Exists);
rm1.waitForState(app1.getApplicationId(),RMAppState.RUNNING);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),3,ContainerState.COMPLETE);
RMAppAttempt newAttempt=app1.getRMAppAttempt(am2.getApplicationAttemptId());
waitForContainersToFinish(4,newAttempt);
boolean container3Exists=false, container4Exists=false, container5Exists=false, container6Exists=false;
for ( ContainerStatus status : newAttempt.getJustFinishedContainers()) {
if (status.getContainerId().equals(containerId3)) {
container3Exists=true;
}
if (status.getContainerId().equals(containerId4)) {
container4Exists=true;
}
if (status.getContainerId().equals(containerId5)) {
container5Exists=true;
}
if (status.getContainerId().equals(containerId6)) {
container6Exists=true;
}
}
Assert.assertTrue(container3Exists && container4Exists && container5Exists&& container6Exists);
rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING);
SchedulerApplicationAttempt schedulerNewAttempt=((AbstractYarnScheduler)rm1.getResourceScheduler()).getCurrentAttemptForContainer(containerId2);
MockRM.finishAMAndVerifyAppState(app1,rm1,nm1,am2);
Assert.assertFalse(schedulerNewAttempt.getLiveContainers().contains(containerId2));
System.out.println("New attempt's just finished containers: " + newAttempt.getJustFinishedContainers());
waitForContainersToFinish(5,newAttempt);
rm1.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testNMTokensRebindOnAMRestart() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,3);
MockRM rm1=new MockRM(conf);
rm1.start();
RMApp app1=rm1.submitApp(200,"myname","myuser",new HashMap(),false,"default",-1,null,"MAPREDUCE",false,true);
MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService());
nm1.registerNode();
MockNM nm2=new MockNM("127.1.1.1:4321",8000,rm1.getResourceTrackerService());
nm2.registerNode();
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
List containers=new ArrayList();
List expectedNMTokens=new ArrayList();
while (true) {
AllocateResponse response=am1.allocate("127.0.0.1",2000,2,new ArrayList());
nm1.nodeHeartbeat(true);
containers.addAll(response.getAllocatedContainers());
expectedNMTokens.addAll(response.getNMTokens());
if (containers.size() == 2) {
break;
}
Thread.sleep(200);
System.out.println("Waiting for container to be allocated.");
}
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),2,ContainerState.RUNNING);
ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2);
rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),3,ContainerState.RUNNING);
ContainerId containerId3=ContainerId.newInstance(am1.getApplicationAttemptId(),3);
rm1.waitForState(nm1,containerId3,RMContainerState.RUNNING);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am1.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
MockAM am2=MockRM.launchAM(app1,rm1,nm1);
RegisterApplicationMasterResponse registerResponse=am2.registerAppAttempt();
rm1.waitForState(app1.getApplicationId(),RMAppState.RUNNING);
Assert.assertEquals(expectedNMTokens,registerResponse.getNMTokensFromPreviousAttempts());
containers=new ArrayList();
while (true) {
AllocateResponse allocateResponse=am2.allocate("127.1.1.1",4000,1,new ArrayList());
nm2.nodeHeartbeat(true);
containers.addAll(allocateResponse.getAllocatedContainers());
expectedNMTokens.addAll(allocateResponse.getNMTokens());
if (containers.size() == 1) {
break;
}
Thread.sleep(200);
System.out.println("Waiting for container to be allocated.");
}
nm1.nodeHeartbeat(am2.getApplicationAttemptId(),2,ContainerState.RUNNING);
ContainerId am2ContainerId2=ContainerId.newInstance(am2.getApplicationAttemptId(),2);
rm1.waitForState(nm1,am2ContainerId2,RMContainerState.RUNNING);
nm1.nodeHeartbeat(am2.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am2.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
MockAM am3=MockRM.launchAM(app1,rm1,nm1);
registerResponse=am3.registerAppAttempt();
rm1.waitForState(app1.getApplicationId(),RMAppState.RUNNING);
List transferredTokens=registerResponse.getNMTokensFromPreviousAttempts();
Assert.assertEquals(2,transferredTokens.size());
Assert.assertTrue(transferredTokens.containsAll(expectedNMTokens));
rm1.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testPreemptedAMRestartOnRMRestart() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED,true);
conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName());
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
CapacityScheduler scheduler=(CapacityScheduler)rm1.getResourceScheduler();
ContainerId amContainer=ContainerId.newInstance(am1.getApplicationAttemptId(),1);
scheduler.killContainer(scheduler.getRMContainer(amContainer));
am1.waitForState(RMAppAttemptState.FAILED);
Assert.assertTrue(!attempt1.shouldCountTowardsMaxAttemptRetry());
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
ApplicationState appState=memStore.getState().getApplicationState().get(app1.getApplicationId());
Assert.assertEquals(1,appState.getAttemptCount());
Assert.assertEquals(ContainerExitStatus.PREEMPTED,appState.getAttempt(am1.getApplicationAttemptId()).getAMContainerExitStatus());
MockRM rm2=new MockRM(conf,memStore);
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
nm1.registerNode();
rm2.start();
MockAM am2=rm2.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),2,nm1);
MockRM.finishAMAndVerifyAppState(app1,rm2,nm1,am2);
RMAppAttempt attempt2=rm2.getRMContext().getRMApps().get(app1.getApplicationId()).getCurrentAppAttempt();
Assert.assertTrue(attempt2.shouldCountTowardsMaxAttemptRetry());
Assert.assertEquals(ContainerExitStatus.INVALID,appState.getAttempt(am2.getApplicationAttemptId()).getAMContainerExitStatus());
rm1.stop();
rm2.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=50000) public void testRMRestartOrFailoverNotCountedForAMFailures() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED,true);
conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName());
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
Assert.assertTrue(((RMAppAttemptImpl)attempt1).mayBeLastAttempt());
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
ApplicationState appState=memStore.getState().getApplicationState().get(app1.getApplicationId());
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
NMContainerStatus status=Records.newRecord(NMContainerStatus.class);
status.setContainerExitStatus(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER);
status.setContainerId(attempt1.getMasterContainer().getId());
status.setContainerState(ContainerState.COMPLETE);
status.setDiagnostics("");
nm1.registerNode(Collections.singletonList(status),null);
rm2.waitForState(attempt1.getAppAttemptId(),RMAppAttemptState.FAILED);
Assert.assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,appState.getAttempt(am1.getApplicationAttemptId()).getAMContainerExitStatus());
rm2.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
MockAM am2=rm2.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),2,nm1);
MockRM.finishAMAndVerifyAppState(app1,rm2,nm1,am2);
RMAppAttempt attempt3=rm2.getRMContext().getRMApps().get(app1.getApplicationId()).getCurrentAppAttempt();
Assert.assertTrue(attempt3.shouldCountTowardsMaxAttemptRetry());
Assert.assertEquals(ContainerExitStatus.INVALID,appState.getAttempt(am2.getApplicationAttemptId()).getAMContainerExitStatus());
rm1.stop();
rm2.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier
@Test(timeout=60000) public void testFSRMStateStore() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
fsTester=new TestFSRMStateStoreTester(cluster);
FSDataOutputStream fsOut=null;
FileSystemRMStateStore fileSystemRMStateStore=(FileSystemRMStateStore)fsTester.getRMStateStore();
String appAttemptIdStr3="appattempt_1352994193343_0001_000003";
ApplicationAttemptId attemptId3=ConverterUtils.toApplicationAttemptId(appAttemptIdStr3);
Path appDir=fsTester.store.getAppDir(attemptId3.getApplicationId().toString());
Path tempAppAttemptFile=new Path(appDir,attemptId3.toString() + ".tmp");
fsOut=fileSystemRMStateStore.fs.create(tempAppAttemptFile,false);
fsOut.write("Some random data ".getBytes());
fsOut.close();
testRMAppStateStore(fsTester);
Assert.assertFalse(fsTester.workingDirPathURI.getFileSystem(conf).exists(tempAppAttemptFile));
testRMDTSecretManagerStateStore(fsTester);
testCheckVersion(fsTester);
testEpoch(fsTester);
testAppDeletion(fsTester);
testDeleteStore(fsTester);
testAMRMTokenSecretManagerStateStore(fsTester);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier BooleanVerifier
@Test public void testAppSuccessPath() throws IOException {
LOG.info("--- START: testAppSuccessPath ---");
final String diagMsg="some diagnostics";
RMApp application=testCreateAppFinished(null,diagMsg);
Assert.assertTrue("Finished application missing diagnostics",application.getDiagnostics().indexOf(diagMsg) != -1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testUnmanagedApp() throws IOException {
ApplicationSubmissionContext subContext=new ApplicationSubmissionContextPBImpl();
subContext.setUnmanagedAM(true);
LOG.info("--- START: testUnmanagedAppSuccessPath ---");
final String diagMsg="some diagnostics";
RMApp application=testCreateAppFinished(subContext,diagMsg);
Assert.assertTrue("Finished app missing diagnostics",application.getDiagnostics().indexOf(diagMsg) != -1);
reset(writer);
LOG.info("--- START: testUnmanagedAppFailPath ---");
application=testCreateAppRunning(subContext);
RMAppEvent event=new RMAppFailedAttemptEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_FAILED,"",false);
application.handle(event);
rmDispatcher.await();
RMAppAttempt appAttempt=application.getCurrentAppAttempt();
Assert.assertEquals(1,appAttempt.getAppAttemptId().getAttemptId());
sendAppUpdateSavedEvent(application);
assertFailed(application,".*Unmanaged application.*Failing the application.*");
assertAppFinalStateSaved(application);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testRunningToFailed(){
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false);
String containerDiagMsg="some error";
int exitCode=123;
ContainerStatus cs=BuilderUtils.newContainerStatus(amContainer.getId(),ContainerState.COMPLETE,containerDiagMsg,exitCode);
ApplicationAttemptId appAttemptId=applicationAttempt.getAppAttemptId();
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs));
assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState());
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(applicationAttempt.getAppAttemptId(),BuilderUtils.newContainerStatus(amContainer.getId(),ContainerState.COMPLETE,"",0)));
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE));
assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState());
assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState());
assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
assertEquals(amContainer,applicationAttempt.getMasterContainer());
assertEquals(0,application.getRanNodes().size());
String rmAppPageUrl=pjoin(RM_WEBAPP_ADDR,"cluster","app",applicationAttempt.getAppAttemptId().getApplicationId());
assertEquals(rmAppPageUrl,applicationAttempt.getOriginalTrackingUrl());
assertEquals(rmAppPageUrl,applicationAttempt.getTrackingUrl());
verifyAMHostAndPortInvalidated();
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testRunningToKilled(){
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false);
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.KILL));
assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState());
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(applicationAttempt.getAppAttemptId(),BuilderUtils.newContainerStatus(amContainer.getId(),ContainerState.COMPLETE,"",0)));
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE));
assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState());
assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.KILLED,applicationAttempt.getAppAttemptState());
assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
assertEquals(amContainer,applicationAttempt.getMasterContainer());
assertEquals(0,application.getRanNodes().size());
String rmAppPageUrl=pjoin(RM_WEBAPP_ADDR,"cluster","app",applicationAttempt.getAppAttemptId().getApplicationId());
assertEquals(rmAppPageUrl,applicationAttempt.getOriginalTrackingUrl());
assertEquals(rmAppPageUrl,applicationAttempt.getTrackingUrl());
verifyTokenCount(applicationAttempt.getAppAttemptId(),1);
verifyAMHostAndPortInvalidated();
verifyApplicationAttemptFinished(RMAppAttemptState.KILLED);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFailedToFailed(){
when(submissionContext.getKeepContainersAcrossApplicationAttempts()).thenReturn(true);
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false);
ContainerStatus cs1=ContainerStatus.newInstance(amContainer.getId(),ContainerState.COMPLETE,"some error",123);
ApplicationAttemptId appAttemptId=applicationAttempt.getAppAttemptId();
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs1));
assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState());
assertTrue(transferStateFromPreviousAttempt);
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
ContainerStatus cs2=ContainerStatus.newInstance(ContainerId.newInstance(appAttemptId,2),ContainerState.COMPLETE,"",0);
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs2));
assertEquals(1,applicationAttempt.getJustFinishedContainers().size());
assertEquals(cs2.getContainerId(),applicationAttempt.getJustFinishedContainers().get(0).getContainerId());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testLaunchedExpire(){
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE));
assertEquals(YarnApplicationAttemptState.LAUNCHED,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState());
assertTrue("expire diagnostics missing",applicationAttempt.getDiagnostics().contains("timed out"));
String rmAppPageUrl=pjoin(RM_WEBAPP_ADDR,"cluster","app",applicationAttempt.getAppAttemptId().getApplicationId());
assertEquals(rmAppPageUrl,applicationAttempt.getOriginalTrackingUrl());
assertEquals(rmAppPageUrl,applicationAttempt.getTrackingUrl());
verifyTokenCount(applicationAttempt.getAppAttemptId(),1);
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testRunningExpire(){
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false);
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE));
assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState());
assertTrue("expire diagnostics missing",applicationAttempt.getDiagnostics().contains("timed out"));
String rmAppPageUrl=pjoin(RM_WEBAPP_ADDR,"cluster","app",applicationAttempt.getAppAttemptId().getApplicationId());
assertEquals(rmAppPageUrl,applicationAttempt.getOriginalTrackingUrl());
assertEquals(rmAppPageUrl,applicationAttempt.getTrackingUrl());
verifyTokenCount(applicationAttempt.getAppAttemptId(),1);
verifyAMHostAndPortInvalidated();
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testReleaseWhileRunning(){
DrainDispatcher drainDispatcher=new DrainDispatcher();
EventHandler appAttemptEventHandler=mock(EventHandler.class);
EventHandler generic=mock(EventHandler.class);
drainDispatcher.register(RMAppAttemptEventType.class,appAttemptEventHandler);
drainDispatcher.register(RMNodeEventType.class,generic);
drainDispatcher.init(new YarnConfiguration());
drainDispatcher.start();
NodeId nodeId=BuilderUtils.newNodeId("host",3425);
ApplicationId appId=BuilderUtils.newApplicationId(1,1);
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1);
ContainerId containerId=BuilderUtils.newContainerId(appAttemptId,1);
ContainerAllocationExpirer expirer=mock(ContainerAllocationExpirer.class);
Resource resource=BuilderUtils.newResource(512,1);
Priority priority=BuilderUtils.newPriority(5);
Container container=BuilderUtils.newContainer(containerId,nodeId,"host:3465",resource,priority,null);
RMApplicationHistoryWriter writer=mock(RMApplicationHistoryWriter.class);
RMContext rmContext=mock(RMContext.class);
when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
RMContainer rmContainer=new RMContainerImpl(container,appAttemptId,nodeId,"user",rmContext);
assertEquals(RMContainerState.NEW,rmContainer.getState());
assertEquals(resource,rmContainer.getAllocatedResource());
assertEquals(nodeId,rmContainer.getAllocatedNode());
assertEquals(priority,rmContainer.getAllocatedPriority());
verify(writer).containerStarted(any(RMContainer.class));
rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.START));
drainDispatcher.await();
assertEquals(RMContainerState.ALLOCATED,rmContainer.getState());
rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.ACQUIRED));
drainDispatcher.await();
assertEquals(RMContainerState.ACQUIRED,rmContainer.getState());
rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.LAUNCHED));
drainDispatcher.await();
assertEquals(RMContainerState.RUNNING,rmContainer.getState());
assertEquals("//host:3465/node/containerlogs/container_1_0001_01_000001/user",rmContainer.getLogURL());
reset(appAttemptEventHandler);
ContainerStatus containerStatus=SchedulerUtils.createAbnormalContainerStatus(containerId,SchedulerUtils.RELEASED_CONTAINER);
rmContainer.handle(new RMContainerFinishedEvent(containerId,containerStatus,RMContainerEventType.RELEASED));
drainDispatcher.await();
assertEquals(RMContainerState.RELEASED,rmContainer.getState());
assertEquals(SchedulerUtils.RELEASED_CONTAINER,rmContainer.getDiagnosticsInfo());
assertEquals(ContainerExitStatus.ABORTED,rmContainer.getContainerExitStatus());
assertEquals(ContainerState.COMPLETE,rmContainer.getContainerState());
verify(writer).containerFinished(any(RMContainer.class));
ArgumentCaptor captor=ArgumentCaptor.forClass(RMAppAttemptContainerFinishedEvent.class);
verify(appAttemptEventHandler).handle(captor.capture());
RMAppAttemptContainerFinishedEvent cfEvent=captor.getValue();
assertEquals(appAttemptId,cfEvent.getApplicationAttemptId());
assertEquals(containerStatus,cfEvent.getContainerStatus());
assertEquals(RMAppAttemptEventType.CONTAINER_FINISHED,cfEvent.getType());
rmContainer.handle(new RMContainerFinishedEvent(containerId,SchedulerUtils.createAbnormalContainerStatus(containerId,"FinishedContainer"),RMContainerEventType.FINISHED));
assertEquals(RMContainerState.RELEASED,rmContainer.getState());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testExpireWhileRunning(){
DrainDispatcher drainDispatcher=new DrainDispatcher();
EventHandler appAttemptEventHandler=mock(EventHandler.class);
EventHandler generic=mock(EventHandler.class);
drainDispatcher.register(RMAppAttemptEventType.class,appAttemptEventHandler);
drainDispatcher.register(RMNodeEventType.class,generic);
drainDispatcher.init(new YarnConfiguration());
drainDispatcher.start();
NodeId nodeId=BuilderUtils.newNodeId("host",3425);
ApplicationId appId=BuilderUtils.newApplicationId(1,1);
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1);
ContainerId containerId=BuilderUtils.newContainerId(appAttemptId,1);
ContainerAllocationExpirer expirer=mock(ContainerAllocationExpirer.class);
Resource resource=BuilderUtils.newResource(512,1);
Priority priority=BuilderUtils.newPriority(5);
Container container=BuilderUtils.newContainer(containerId,nodeId,"host:3465",resource,priority,null);
RMApplicationHistoryWriter writer=mock(RMApplicationHistoryWriter.class);
RMContext rmContext=mock(RMContext.class);
when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
RMContainer rmContainer=new RMContainerImpl(container,appAttemptId,nodeId,"user",rmContext);
assertEquals(RMContainerState.NEW,rmContainer.getState());
assertEquals(resource,rmContainer.getAllocatedResource());
assertEquals(nodeId,rmContainer.getAllocatedNode());
assertEquals(priority,rmContainer.getAllocatedPriority());
verify(writer).containerStarted(any(RMContainer.class));
rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.START));
drainDispatcher.await();
assertEquals(RMContainerState.ALLOCATED,rmContainer.getState());
rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.ACQUIRED));
drainDispatcher.await();
assertEquals(RMContainerState.ACQUIRED,rmContainer.getState());
rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.LAUNCHED));
drainDispatcher.await();
assertEquals(RMContainerState.RUNNING,rmContainer.getState());
assertEquals("//host:3465/node/containerlogs/container_1_0001_01_000001/user",rmContainer.getLogURL());
reset(appAttemptEventHandler);
ContainerStatus containerStatus=SchedulerUtils.createAbnormalContainerStatus(containerId,SchedulerUtils.EXPIRED_CONTAINER);
rmContainer.handle(new RMContainerFinishedEvent(containerId,containerStatus,RMContainerEventType.EXPIRE));
drainDispatcher.await();
assertEquals(RMContainerState.RUNNING,rmContainer.getState());
verify(writer,never()).containerFinished(any(RMContainer.class));
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testExistenceOfResourceRequestInRMContainer() throws Exception {
Configuration conf=new Configuration();
MockRM rm1=new MockRM(conf);
rm1.start();
MockNM nm1=rm1.registerNode("unknownhost:1234",8000);
RMApp app1=rm1.submitApp(1024);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
ResourceScheduler scheduler=rm1.getResourceScheduler();
am1.allocate("127.0.0.1",1024,1,new ArrayList());
ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2);
rm1.waitForState(nm1,containerId2,RMContainerState.ALLOCATED);
Assert.assertNotNull(scheduler.getRMContainer(containerId2).getResourceRequests());
am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
rm1.waitForState(nm1,containerId2,RMContainerState.ACQUIRED);
Assert.assertNull(scheduler.getRMContainer(containerId2).getResourceRequests());
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testMetricsCache(){
MetricsSystem ms=new MetricsSystemImpl("cache");
ms.start();
try {
String p1="root1";
String leafQueueName="root1.leaf";
QueueMetrics p1Metrics=QueueMetrics.forQueue(ms,p1,null,true,conf);
Queue parentQueue1=make(stub(Queue.class).returning(p1Metrics).from.getMetrics());
QueueMetrics metrics=QueueMetrics.forQueue(ms,leafQueueName,parentQueue1,true,conf);
Assert.assertNotNull("QueueMetrics for A shoudn't be null",metrics);
QueueMetrics alterMetrics=QueueMetrics.forQueue(ms,leafQueueName,parentQueue1,true,conf);
Assert.assertNotNull("QueueMetrics for alterMetrics shoudn't be null",alterMetrics);
}
finally {
ms.shutdown();
}
}
APIUtilityVerifier NullVerifier
@Test public void testQueueAppMetricsForMultipleFailures(){
String queueName="single";
String user="alice";
QueueMetrics metrics=QueueMetrics.forQueue(ms,queueName,null,false,new Configuration());
MetricsSource queueSource=queueSource(ms,queueName);
AppSchedulingInfo app=mockApp(user);
metrics.submitApp(user);
MetricsSource userSource=userSource(ms,queueName,user);
checkApps(queueSource,1,0,0,0,0,0,true);
metrics.submitAppAttempt(user);
checkApps(queueSource,1,1,0,0,0,0,true);
metrics.runAppAttempt(app.getApplicationId(),user);
checkApps(queueSource,1,0,1,0,0,0,true);
metrics.finishAppAttempt(app.getApplicationId(),app.isPending(),app.getUser());
checkApps(queueSource,1,0,0,0,0,0,true);
metrics.submitAppAttempt(user);
checkApps(queueSource,1,1,0,0,0,0,true);
metrics.runAppAttempt(app.getApplicationId(),user);
checkApps(queueSource,1,0,1,0,0,0,true);
metrics.finishAppAttempt(app.getApplicationId(),app.isPending(),app.getUser());
checkApps(queueSource,1,0,0,0,0,0,true);
metrics.submitAppAttempt(user);
checkApps(queueSource,1,1,0,0,0,0,true);
metrics.runAppAttempt(app.getApplicationId(),user);
checkApps(queueSource,1,0,1,0,0,0,true);
metrics.finishAppAttempt(app.getApplicationId(),app.isPending(),app.getUser());
checkApps(queueSource,1,0,0,0,0,0,true);
metrics.finishApp(user,RMAppState.FAILED);
checkApps(queueSource,1,0,0,0,1,0,true);
assertNull(userSource);
}
APIUtilityVerifier NullVerifier
@Test public void testDefaultSingleQueueMetrics(){
String queueName="single";
String user="alice";
QueueMetrics metrics=QueueMetrics.forQueue(ms,queueName,null,false,conf);
MetricsSource queueSource=queueSource(ms,queueName);
AppSchedulingInfo app=mockApp(user);
metrics.submitApp(user);
MetricsSource userSource=userSource(ms,queueName,user);
checkApps(queueSource,1,0,0,0,0,0,true);
metrics.submitAppAttempt(user);
checkApps(queueSource,1,1,0,0,0,0,true);
metrics.setAvailableResourcesToQueue(Resources.createResource(100 * GB,100));
metrics.incrPendingResources(user,5,Resources.createResource(3 * GB,3));
checkResources(queueSource,0,0,0,0,0,100 * GB,100,15 * GB,15,5,0,0,0);
metrics.runAppAttempt(app.getApplicationId(),user);
checkApps(queueSource,1,0,1,0,0,0,true);
metrics.allocateResources(user,3,Resources.createResource(2 * GB,2),true);
checkResources(queueSource,6 * GB,6,3,3,0,100 * GB,100,9 * GB,9,2,0,0,0);
metrics.releaseResources(user,1,Resources.createResource(2 * GB,2));
checkResources(queueSource,4 * GB,4,2,3,1,100 * GB,100,9 * GB,9,2,0,0,0);
metrics.finishAppAttempt(app.getApplicationId(),app.isPending(),app.getUser());
checkApps(queueSource,1,0,0,0,0,0,true);
metrics.finishApp(user,RMAppState.FINISHED);
checkApps(queueSource,1,0,0,1,0,0,true);
assertNull(userSource);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMove(){
final String user="user1";
Queue parentQueue=createQueue("parent",null);
Queue oldQueue=createQueue("old",parentQueue);
Queue newQueue=createQueue("new",parentQueue);
QueueMetrics parentMetrics=parentQueue.getMetrics();
QueueMetrics oldMetrics=oldQueue.getMetrics();
QueueMetrics newMetrics=newQueue.getMetrics();
ApplicationAttemptId appAttId=createAppAttemptId(0,0);
RMContext rmContext=mock(RMContext.class);
when(rmContext.getEpoch()).thenReturn(3);
SchedulerApplicationAttempt app=new SchedulerApplicationAttempt(appAttId,user,oldQueue,oldQueue.getActiveUsersManager(),rmContext);
oldMetrics.submitApp(user);
assertEquals(app.getNewContainerId(),0x00c00001);
Resource requestedResource=Resource.newInstance(1536,2);
Priority requestedPriority=Priority.newInstance(2);
ResourceRequest request=ResourceRequest.newInstance(requestedPriority,ResourceRequest.ANY,requestedResource,3);
app.updateResourceRequests(Arrays.asList(request));
RMContainer container1=createRMContainer(appAttId,1,requestedResource);
app.liveContainers.put(container1.getContainerId(),container1);
SchedulerNode node=createNode();
app.appSchedulingInfo.allocate(NodeType.OFF_SWITCH,node,requestedPriority,request,container1.getContainer());
Priority prio1=Priority.newInstance(1);
Resource reservedResource=Resource.newInstance(2048,3);
RMContainer container2=createReservedRMContainer(appAttId,1,reservedResource,node.getNodeID(),prio1);
Map reservations=new HashMap();
reservations.put(node.getNodeID(),container2);
app.reservedContainers.put(prio1,reservations);
oldMetrics.reserveResource(user,reservedResource);
checkQueueMetrics(oldMetrics,1,1,1536,2,2048,3,3072,4);
checkQueueMetrics(newMetrics,0,0,0,0,0,0,0,0);
checkQueueMetrics(parentMetrics,1,1,1536,2,2048,3,3072,4);
app.move(newQueue);
checkQueueMetrics(oldMetrics,0,0,0,0,0,0,0,0);
checkQueueMetrics(newMetrics,1,1,1536,2,2048,3,3072,4);
checkQueueMetrics(parentMetrics,1,1,1536,2,2048,3,3072,4);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testNormalizeRequest(){
ResourceCalculator resourceCalculator=new DefaultResourceCalculator();
final int minMemory=1024;
final int maxMemory=8192;
Resource minResource=Resources.createResource(minMemory,0);
Resource maxResource=Resources.createResource(maxMemory,0);
ResourceRequest ask=new ResourceRequestPBImpl();
ask.setCapability(Resources.createResource(-1024));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource);
assertEquals(minMemory,ask.getCapability().getMemory());
ask.setCapability(Resources.createResource(0));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource);
assertEquals(minMemory,ask.getCapability().getMemory());
ask.setCapability(Resources.createResource(2 * minMemory));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource);
assertEquals(2 * minMemory,ask.getCapability().getMemory());
ask.setCapability(Resources.createResource(minMemory + 10));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource);
assertEquals(2 * minMemory,ask.getCapability().getMemory());
ask.setCapability(Resources.createResource(maxMemory));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource);
assertEquals(maxMemory,ask.getCapability().getMemory());
ask.setCapability(Resources.createResource(maxMemory - 10));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource);
assertEquals(maxMemory,ask.getCapability().getMemory());
maxResource=Resources.createResource(maxMemory - 10,0);
ask.setCapability(Resources.createResource(maxMemory - 100));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource);
assertEquals(maxResource.getMemory(),ask.getCapability().getMemory());
maxResource=Resources.createResource(maxMemory,0);
ask.setCapability(Resources.createResource(maxMemory + 100));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource);
assertEquals(maxResource.getMemory(),ask.getCapability().getMemory());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testLimitsComputation() throws Exception {
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
setupQueueConfiguration(csConf);
YarnConfiguration conf=new YarnConfiguration();
CapacitySchedulerContext csContext=mock(CapacitySchedulerContext.class);
when(csContext.getConfiguration()).thenReturn(csConf);
when(csContext.getConf()).thenReturn(conf);
when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB,1));
when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB,16));
when(csContext.getApplicationComparator()).thenReturn(CapacityScheduler.applicationComparator);
when(csContext.getQueueComparator()).thenReturn(CapacityScheduler.queueComparator);
when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
Resource clusterResource=Resources.createResource(100 * 16 * GB,100 * 16);
when(csContext.getClusterResource()).thenReturn(clusterResource);
Map queues=new HashMap();
CSQueue root=CapacityScheduler.parseQueue(csContext,csConf,null,"root",queues,queues,TestUtils.spyHook);
LeafQueue queue=(LeafQueue)queues.get(A);
LOG.info("Queue 'A' -" + " maxActiveApplications=" + queue.getMaximumActiveApplications() + " maxActiveApplicationsPerUser="+ queue.getMaximumActiveApplicationsPerUser());
int expectedMaxActiveApps=Math.max(1,(int)Math.ceil(((float)clusterResource.getMemory() / (1 * GB)) * csConf.getMaximumApplicationMasterResourcePerQueuePercent(queue.getQueuePath()) * queue.getAbsoluteMaximumCapacity()));
assertEquals(expectedMaxActiveApps,queue.getMaximumActiveApplications());
int expectedMaxActiveAppsUsingAbsCap=Math.max(1,(int)Math.ceil(((float)clusterResource.getMemory() / (1 * GB)) * csConf.getMaximumApplicationMasterResourcePercent() * queue.getAbsoluteCapacity()));
assertEquals((int)Math.ceil(expectedMaxActiveAppsUsingAbsCap * (queue.getUserLimit() / 100.0f) * queue.getUserLimitFactor()),queue.getMaximumActiveApplicationsPerUser());
assertEquals((int)(clusterResource.getMemory() * queue.getAbsoluteCapacity()),queue.getMetrics().getAvailableMB());
clusterResource=Resources.createResource(120 * 16 * GB);
root.updateClusterResource(clusterResource);
expectedMaxActiveApps=Math.max(1,(int)Math.ceil(((float)clusterResource.getMemory() / (1 * GB)) * csConf.getMaximumApplicationMasterResourcePerQueuePercent(queue.getQueuePath()) * queue.getAbsoluteMaximumCapacity()));
assertEquals(expectedMaxActiveApps,queue.getMaximumActiveApplications());
expectedMaxActiveAppsUsingAbsCap=Math.max(1,(int)Math.ceil(((float)clusterResource.getMemory() / (1 * GB)) * csConf.getMaximumApplicationMasterResourcePercent() * queue.getAbsoluteCapacity()));
assertEquals((int)Math.ceil(expectedMaxActiveAppsUsingAbsCap * (queue.getUserLimit() / 100.0f) * queue.getUserLimitFactor()),queue.getMaximumActiveApplicationsPerUser());
assertEquals((int)(clusterResource.getMemory() * queue.getAbsoluteCapacity()),queue.getMetrics().getAvailableMB());
assertEquals((int)CapacitySchedulerConfiguration.UNDEFINED,csConf.getMaximumApplicationsPerQueue(queue.getQueuePath()));
int expectedMaxApps=(int)(CapacitySchedulerConfiguration.DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS * queue.getAbsoluteCapacity());
assertEquals(expectedMaxApps,queue.getMaxApplications());
int expectedMaxAppsPerUser=(int)(expectedMaxApps * (queue.getUserLimit() / 100.0f) * queue.getUserLimitFactor());
assertEquals(expectedMaxAppsPerUser,queue.getMaxApplicationsPerUser());
assertEquals((long)CapacitySchedulerConfiguration.DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT,(long)csConf.getMaximumApplicationMasterResourcePerQueuePercent(queue.getQueuePath()));
csConf.setFloat("yarn.scheduler.capacity." + queue.getQueuePath() + ".maximum-am-resource-percent",0.5f);
queues=new HashMap();
root=CapacityScheduler.parseQueue(csContext,csConf,null,"root",queues,queues,TestUtils.spyHook);
clusterResource=Resources.createResource(100 * 16 * GB);
queue=(LeafQueue)queues.get(A);
expectedMaxActiveApps=Math.max(1,(int)Math.ceil(((float)clusterResource.getMemory() / (1 * GB)) * csConf.getMaximumApplicationMasterResourcePerQueuePercent(queue.getQueuePath()) * queue.getAbsoluteMaximumCapacity()));
assertEquals((long)0.5,(long)csConf.getMaximumApplicationMasterResourcePerQueuePercent(queue.getQueuePath()));
assertEquals(expectedMaxActiveApps,queue.getMaximumActiveApplications());
csConf.setInt("yarn.scheduler.capacity." + queue.getQueuePath() + ".maximum-applications",9999);
queues=new HashMap();
root=CapacityScheduler.parseQueue(csContext,csConf,null,"root",queues,queues,TestUtils.spyHook);
queue=(LeafQueue)queues.get(A);
assertEquals(9999,(int)csConf.getMaximumApplicationsPerQueue(queue.getQueuePath()));
assertEquals(9999,queue.getMaxApplications());
expectedMaxAppsPerUser=(int)(9999 * (queue.getUserLimit() / 100.0f) * queue.getUserLimitFactor());
assertEquals(expectedMaxAppsPerUser,queue.getMaxApplicationsPerUser());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testActiveLimitsWithKilledApps() throws Exception {
final String user_0="user_0";
int APPLICATION_ID=0;
doReturn(2).when(queue).getMaximumActiveApplications();
FiCaSchedulerApp app_0=getMockApplication(APPLICATION_ID++,user_0);
queue.submitApplicationAttempt(app_0,user_0);
assertEquals(1,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(1,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
assertTrue(queue.activeApplications.contains(app_0));
FiCaSchedulerApp app_1=getMockApplication(APPLICATION_ID++,user_0);
queue.submitApplicationAttempt(app_1,user_0);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
assertTrue(queue.activeApplications.contains(app_1));
FiCaSchedulerApp app_2=getMockApplication(APPLICATION_ID++,user_0);
queue.submitApplicationAttempt(app_2,user_0);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(1,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(1,queue.getNumPendingApplications(user_0));
assertTrue(queue.pendingApplications.contains(app_2));
FiCaSchedulerApp app_3=getMockApplication(APPLICATION_ID++,user_0);
queue.submitApplicationAttempt(app_3,user_0);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(2,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(2,queue.getNumPendingApplications(user_0));
assertTrue(queue.pendingApplications.contains(app_3));
queue.finishApplicationAttempt(app_2,A);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(1,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(1,queue.getNumPendingApplications(user_0));
assertFalse(queue.pendingApplications.contains(app_2));
assertFalse(queue.activeApplications.contains(app_2));
queue.finishApplicationAttempt(app_0,A);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
assertTrue(queue.activeApplications.contains(app_3));
assertFalse(queue.pendingApplications.contains(app_3));
assertFalse(queue.activeApplications.contains(app_0));
queue.finishApplicationAttempt(app_1,A);
assertEquals(1,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(1,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
assertFalse(queue.activeApplications.contains(app_1));
queue.finishApplicationAttempt(app_3,A);
assertEquals(0,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(0,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
assertFalse(queue.activeApplications.contains(app_3));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAbsoluteMaxAvailCapacityWithUse() throws Exception {
ResourceCalculator resourceCalculator=new DefaultResourceCalculator();
Resource clusterResource=Resources.createResource(100 * 16 * GB,100 * 32);
YarnConfiguration conf=new YarnConfiguration();
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
CapacitySchedulerContext csContext=mock(CapacitySchedulerContext.class);
when(csContext.getConf()).thenReturn(conf);
when(csContext.getConfiguration()).thenReturn(csConf);
when(csContext.getClusterResource()).thenReturn(clusterResource);
when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB,1));
when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB,32));
final String L1Q1="L1Q1";
final String L1Q2="L1Q2";
final String L2Q1="L2Q1";
final String L2Q2="L2Q2";
csConf.setQueues(CapacitySchedulerConfiguration.ROOT,new String[]{L1Q1,L1Q2,L2Q1,L2Q2});
final String L1Q1P=CapacitySchedulerConfiguration.ROOT + "." + L1Q1;
csConf.setCapacity(L1Q1P,80);
csConf.setMaximumCapacity(L1Q1P,80);
final String L1Q2P=CapacitySchedulerConfiguration.ROOT + "." + L1Q2;
csConf.setCapacity(L1Q2P,20);
csConf.setMaximumCapacity(L1Q2P,100);
final String L2Q1P=L1Q1P + "." + L2Q1;
csConf.setCapacity(L2Q1P,50);
csConf.setMaximumCapacity(L2Q1P,50);
final String L2Q2P=L1Q1P + "." + L2Q2;
csConf.setCapacity(L2Q2P,50);
csConf.setMaximumCapacity(L2Q2P,50);
float result;
ParentQueue root=new ParentQueue(csContext,CapacitySchedulerConfiguration.ROOT,null,null);
LeafQueue l1q1=new LeafQueue(csContext,L1Q1,root,null);
LeafQueue l1q2=new LeafQueue(csContext,L1Q2,root,null);
LeafQueue l2q2=new LeafQueue(csContext,L2Q2,l1q1,null);
LeafQueue l2q1=new LeafQueue(csContext,L2Q1,l1q1,null);
result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2);
assertEquals(0.4f,result,0.000001f);
LOG.info("t2 l2q2 " + result);
Resources.addTo(root.getUsedResources(),Resources.multiply(clusterResource,0.1f));
Resources.addTo(l1q2.getUsedResources(),Resources.multiply(clusterResource,0.1f));
result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2);
assertEquals(0.4f,result,0.000001f);
LOG.info("t2 l2q2 " + result);
Resources.addTo(root.getUsedResources(),Resources.multiply(clusterResource,0.3f));
Resources.addTo(l1q2.getUsedResources(),Resources.multiply(clusterResource,0.3f));
result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2);
assertEquals(0.3f,result,0.000001f);
LOG.info("t2 l2q2 " + result);
Resources.addTo(root.getUsedResources(),Resources.multiply(clusterResource,0.1f));
Resources.addTo(l1q1.getUsedResources(),Resources.multiply(clusterResource,0.1f));
result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2);
assertEquals(0.3f,result,0.000001f);
LOG.info("t2 l2q2 " + result);
Resources.addTo(root.getUsedResources(),Resources.multiply(clusterResource,0.2f));
Resources.addTo(l1q1.getUsedResources(),Resources.multiply(clusterResource,0.2f));
Resources.addTo(l2q1.getUsedResources(),Resources.multiply(clusterResource,0.2f));
result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2);
assertEquals(0.3f,result,0.000001f);
LOG.info("t2 l2q2 " + result);
Resources.addTo(root.getUsedResources(),Resources.multiply(clusterResource,0.2f));
Resources.addTo(l1q1.getUsedResources(),Resources.multiply(clusterResource,0.2f));
Resources.addTo(l2q1.getUsedResources(),Resources.multiply(clusterResource,0.2f));
result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2);
assertEquals(0.1f,result,0.000001f);
LOG.info("t2 l2q2 " + result);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testRefreshQueuesWithNewQueue() throws Exception {
CapacityScheduler cs=new CapacityScheduler();
CapacitySchedulerConfiguration conf=new CapacitySchedulerConfiguration();
setupQueueConfiguration(conf);
cs.setConf(new YarnConfiguration());
cs.setRMContext(resourceManager.getRMContext());
cs.init(conf);
cs.start();
cs.reinitialize(conf,new RMContextImpl(null,null,null,null,null,null,new RMContainerTokenSecretManager(conf),new NMTokenSecretManagerInRM(conf),new ClientToAMTokenSecretManagerInRM(),null));
checkQueueCapacities(cs,A_CAPACITY,B_CAPACITY);
String B4=B + ".b4";
float B4_CAPACITY=10;
B3_CAPACITY-=B4_CAPACITY;
try {
conf.setCapacity(A,80f);
conf.setCapacity(B,20f);
conf.setQueues(B,new String[]{"b1","b2","b3","b4"});
conf.setCapacity(B1,B1_CAPACITY);
conf.setCapacity(B2,B2_CAPACITY);
conf.setCapacity(B3,B3_CAPACITY);
conf.setCapacity(B4,B4_CAPACITY);
cs.reinitialize(conf,mockContext);
checkQueueCapacities(cs,80f,20f);
CSQueue rootQueue=cs.getRootQueue();
CSQueue queueB=findQueue(rootQueue,B);
CSQueue queueB4=findQueue(queueB,B4);
assertEquals(queueB,queueB4.getParent());
}
finally {
B3_CAPACITY+=B4_CAPACITY;
cs.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveAppSameParent() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a1"));
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
List appsInA2=scheduler.getAppsInQueue("a2");
assertTrue(appsInA2.isEmpty());
scheduler.moveApplication(app.getApplicationId(),"a2");
appsInA2=scheduler.getAppsInQueue("a2");
assertEquals(1,appsInA2.size());
queue=scheduler.getApplicationAttempt(appsInA2.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a2"));
appsInA1=scheduler.getAppsInQueue("a1");
assertTrue(appsInA1.isEmpty());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveAllApps() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a1"));
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
List appsInB1=scheduler.getAppsInQueue("b1");
assertTrue(appsInB1.isEmpty());
List appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.isEmpty());
scheduler.moveAllApps("a1","b1");
Thread.sleep(1000);
appsInB1=scheduler.getAppsInQueue("b1");
assertEquals(1,appsInB1.size());
queue=scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("b1"));
appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.contains(appAttemptId));
assertEquals(1,appsInB.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
appsInA1=scheduler.getAppsInQueue("a1");
assertTrue(appsInA1.isEmpty());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.isEmpty());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testKillAllAppsInQueue() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a1"));
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
scheduler.killAllAppsInQueue("a1");
rm.waitForState(app.getApplicationId(),RMAppState.KILLED);
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.isEmpty());
appsInA1=scheduler.getAppsInQueue("a1");
assertTrue(appsInA1.isEmpty());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.isEmpty());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveAppBasic() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a1"));
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
List appsInB1=scheduler.getAppsInQueue("b1");
assertTrue(appsInB1.isEmpty());
List appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.isEmpty());
scheduler.moveApplication(app.getApplicationId(),"b1");
appsInB1=scheduler.getAppsInQueue("b1");
assertEquals(1,appsInB1.size());
queue=scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("b1"));
appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.contains(appAttemptId));
assertEquals(1,appsInB.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
appsInA1=scheduler.getAppsInQueue("a1");
assertTrue(appsInA1.isEmpty());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.isEmpty());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAddAndRemoveAppFromCapacityScheduler() throws Exception {
CapacitySchedulerConfiguration conf=new CapacitySchedulerConfiguration();
setupQueueConfiguration(conf);
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
MockRM rm=new MockRM(conf);
@SuppressWarnings("unchecked") AbstractYarnScheduler cs=(AbstractYarnScheduler)rm.getResourceScheduler();
SchedulerApplication app=TestSchedulerUtils.verifyAppAddedAndRemovedFromScheduler(cs.getSchedulerApplications(),cs,"a1");
Assert.assertEquals("a1",app.getQueue().getQueueName());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRecoverRequestAfterPreemption() throws Exception {
Configuration conf=new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
MockRM rm1=new MockRM(conf);
rm1.start();
MockNM nm1=rm1.registerNode("127.0.0.1:1234",8000);
RMApp app1=rm1.submitApp(1024);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
CapacityScheduler cs=(CapacityScheduler)rm1.getResourceScheduler();
am1.allocate("127.0.0.1",1024,1,new ArrayList());
ContainerId containerId1=ContainerId.newInstance(am1.getApplicationAttemptId(),2);
rm1.waitForState(nm1,containerId1,RMContainerState.ALLOCATED);
RMContainer rmContainer=cs.getRMContainer(containerId1);
List requests=rmContainer.getResourceRequests();
FiCaSchedulerApp app=cs.getApplicationAttempt(am1.getApplicationAttemptId());
FiCaSchedulerNode node=cs.getNode(rmContainer.getAllocatedNode());
for ( ResourceRequest request : requests) {
if (request.getResourceName().equals(node.getRackName()) || request.getResourceName().equals(ResourceRequest.ANY)) {
continue;
}
Assert.assertNull(app.getResourceRequest(request.getPriority(),request.getResourceName()));
}
cs.killContainer(rmContainer);
Assert.assertEquals(3,requests.size());
for ( ResourceRequest request : requests) {
Assert.assertEquals(1,app.getResourceRequest(request.getPriority(),request.getResourceName()).getNumContainers());
}
ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),3);
rm1.waitForState(nm1,containerId2,RMContainerState.ALLOCATED);
List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
Assert.assertTrue(containers.size() == 1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@SuppressWarnings("resource") @Test public void testBlackListNodes() throws Exception {
Configuration conf=new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
MockRM rm=new MockRM(conf);
rm.start();
CapacityScheduler cs=(CapacityScheduler)rm.getResourceScheduler();
String host="127.0.0.1";
RMNode node=MockNodes.newNodeInfo(0,MockNodes.newResource(4 * GB),1,host);
cs.handle(new NodeAddedSchedulerEvent(node));
ApplicationId appId=BuilderUtils.newApplicationId(100,1);
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1);
SchedulerEvent addAppEvent=new AppAddedSchedulerEvent(appId,"default","user");
cs.handle(addAppEvent);
SchedulerEvent addAttemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId,false);
cs.handle(addAttemptEvent);
cs.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),Collections.singletonList(host),null);
Assert.assertTrue(cs.getApplicationAttempt(appAttemptId).isBlacklisted(host));
cs.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,Collections.singletonList(host));
Assert.assertFalse(cs.getApplicationAttempt(appAttemptId).isBlacklisted(host));
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNormalContainerAllocationWhenDNSUnavailable() throws Exception {
MockRM rm1=new MockRM(conf);
rm1.start();
MockNM nm1=rm1.registerNode("unknownhost:1234",8000);
RMApp app1=rm1.submitApp(200);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
am1.allocate("127.0.0.1",1024,1,new ArrayList());
ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2);
rm1.waitForState(nm1,containerId2,RMContainerState.ALLOCATED);
SecurityUtilTestHelper.setTokenServiceUseIp(true);
List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
Assert.assertEquals(0,containers.size());
SecurityUtilTestHelper.setTokenServiceUseIp(false);
containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
Assert.assertEquals(1,containers.size());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testContainerTokenGeneratedOnPullRequest() throws Exception {
MockRM rm1=new MockRM(conf);
rm1.start();
MockNM nm1=rm1.registerNode("127.0.0.1:1234",8000);
RMApp app1=rm1.submitApp(200);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
am1.allocate("127.0.0.1",1024,1,new ArrayList());
ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2);
rm1.waitForState(nm1,containerId2,RMContainerState.ALLOCATED);
RMContainer container=rm1.getResourceScheduler().getRMContainer(containerId2);
Assert.assertEquals(containerId2,container.getContainerId());
Assert.assertNull(container.getContainer().getContainerToken());
List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
Assert.assertEquals(containerId2,containers.get(0).getId());
Assert.assertNotNull(containers.get(0).getContainerToken());
rm1.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleQueueOneUserMetrics() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(B));
final String user_0="user_0";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_1,user_0);
String host_0="127.0.0.1";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,8 * GB);
final int numNodes=1;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,3,true,priority,recordFactory)));
a.assignContainers(clusterResource,node_0);
assertEquals((int)(node_0.getTotalResource().getMemory() * a.getCapacity()) - (1 * GB),a.getMetrics().getAvailableMB());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleQueueWithOneUser() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
a.setMaxCapacity(1.0f);
final String user_0="user_0";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_1,user_0);
String host_0="127.0.0.1";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,8 * GB);
final int numNodes=1;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,3,true,priority,recordFactory)));
app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory)));
a.assignContainers(clusterResource,node_0);
assertEquals(1 * GB,a.getUsedResources().getMemory());
assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(1 * GB,a.getMetrics().getAllocatedMB());
assertEquals(0 * GB,a.getMetrics().getAvailableMB());
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(2 * GB,a.getMetrics().getAllocatedMB());
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(2 * GB,a.getMetrics().getAllocatedMB());
a.setUserLimitFactor(10);
a.assignContainers(clusterResource,node_0);
assertEquals(3 * GB,a.getUsedResources().getMemory());
assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(3 * GB,a.getMetrics().getAllocatedMB());
a.assignContainers(clusterResource,node_0);
assertEquals(4 * GB,a.getUsedResources().getMemory());
assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(4 * GB,a.getMetrics().getAllocatedMB());
a.setMaxCapacity(0.5f);
a.assignContainers(clusterResource,node_0);
assertEquals(4 * GB,a.getUsedResources().getMemory());
assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(4 * GB,a.getMetrics().getAllocatedMB());
for ( RMContainer rmContainer : app_0.getLiveContainers()) {
a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
}
assertEquals(1 * GB,a.getUsedResources().getMemory());
assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(1 * GB,a.getMetrics().getAllocatedMB());
for ( RMContainer rmContainer : app_1.getLiveContainers()) {
a.completedContainer(clusterResource,app_1,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
}
assertEquals(0 * GB,a.getUsedResources().getMemory());
assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(0 * GB,a.getMetrics().getAllocatedMB());
assertEquals((int)(a.getCapacity() * node_0.getTotalResource().getMemory()),a.getMetrics().getAvailableMB());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testActivateApplicationByUpdatingClusterResource() throws Exception {
LeafQueue e=stubLeafQueue((LeafQueue)queues.get(E));
final String user_e="user_e";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_e,e,mock(ActiveUsersManager.class),rmContext);
e.submitApplicationAttempt(app_0,user_e);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_e,e,mock(ActiveUsersManager.class),rmContext);
e.submitApplicationAttempt(app_1,user_e);
final ApplicationAttemptId appAttemptId_2=TestUtils.getMockApplicationAttemptId(2,0);
FiCaSchedulerApp app_2=new FiCaSchedulerApp(appAttemptId_2,user_e,e,mock(ActiveUsersManager.class),rmContext);
e.submitApplicationAttempt(app_2,user_e);
assertEquals(2,e.activeApplications.size());
assertEquals(1,e.pendingApplications.size());
e.updateClusterResource(Resources.createResource(200 * 16 * GB,100 * 32));
assertEquals(3,e.activeApplications.size());
assertEquals(0,e.pendingApplications.size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testNodeLocalityAfterQueueRefresh() throws Exception {
LeafQueue e=stubLeafQueue((LeafQueue)queues.get(E));
assertEquals(40,e.getNodeLocalityDelay());
csConf.setInt(CapacitySchedulerConfiguration.NODE_LOCALITY_DELAY,60);
Map newQueues=new HashMap();
CSQueue newRoot=CapacityScheduler.parseQueue(csContext,csConf,null,CapacitySchedulerConfiguration.ROOT,newQueues,queues,TestUtils.spyHook);
queues=newQueues;
root.reinitialize(newRoot,cs.getClusterResource());
assertEquals(60,e.getNodeLocalityDelay());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMaxAMResourcePerQueuePercentAfterQueueRefresh() throws Exception {
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
Resource clusterResource=Resources.createResource(100 * 16 * GB,100 * 32);
CapacitySchedulerContext csContext=mockCSContext(csConf,clusterResource);
csConf.setFloat(CapacitySchedulerConfiguration.MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT,0.1f);
ParentQueue root=new ParentQueue(csContext,CapacitySchedulerConfiguration.ROOT,null,null);
csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + A,80);
LeafQueue a=new LeafQueue(csContext,A,root,null);
assertEquals(0.1f,a.getMaxAMResourcePerQueuePercent(),1e-3f);
assertEquals(160,a.getMaximumActiveApplications());
csConf.setFloat(CapacitySchedulerConfiguration.MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT,0.2f);
LeafQueue newA=new LeafQueue(csContext,A,root,null);
a.reinitialize(newA,clusterResource);
assertEquals(0.2f,a.getMaxAMResourcePerQueuePercent(),1e-3f);
assertEquals(320,a.getMaximumActiveApplications());
Resource newClusterResource=Resources.createResource(100 * 20 * GB,100 * 32);
a.updateClusterResource(newClusterResource);
assertEquals(400,a.getMaximumActiveApplications());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testLocalityConstraints() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
String user_0="user_0";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=spy(new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext));
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=spy(new FiCaSchedulerApp(appAttemptId_1,user_0,a,mock(ActiveUsersManager.class),rmContext));
a.submitApplicationAttempt(app_1,user_0);
String host_0_0="127.0.0.1";
String rack_0="rack_0";
String host_0_1="127.0.0.2";
FiCaSchedulerNode node_0_1=TestUtils.getMockNode(host_0_1,rack_0,0,8 * GB);
String host_1_0="127.0.0.3";
String rack_1="rack_1";
FiCaSchedulerNode node_1_0=TestUtils.getMockNode(host_1_0,rack_1,0,8 * GB);
String host_1_1="127.0.0.4";
FiCaSchedulerNode node_1_1=TestUtils.getMockNode(host_1_1,rack_1,0,8 * GB);
final int numNodes=4;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 1);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
List app_0_requests_0=new ArrayList();
app_0_requests_0.add(TestUtils.createResourceRequest(host_0_0,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(host_1_0,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,false,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,1,false,priority,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
app_0.updateBlacklist(Collections.singletonList(host_0_0),null);
app_0_requests_0.clear();
a.assignContainers(clusterResource,node_0_1);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_0_1),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
a.assignContainers(clusterResource,node_1_1);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_0_1),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,true,priority,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
app_0.updateBlacklist(Collections.singletonList(host_1_1),null);
app_0_requests_0.clear();
a.assignContainers(clusterResource,node_1_1);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_1_1),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
app_0.updateResourceRequests(app_0_requests_0);
app_0.updateBlacklist(Collections.singletonList(rack_1),Collections.singletonList(host_1_1));
app_0_requests_0.clear();
a.assignContainers(clusterResource,node_1_1);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_1_1),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
app_0.updateResourceRequests(app_0_requests_0);
app_0.updateBlacklist(null,Collections.singletonList(rack_1));
app_0_requests_0.clear();
a.assignContainers(clusterResource,node_1_1);
verify(app_0,never()).allocate(eq(NodeType.RACK_LOCAL),eq(node_1_1),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
assertEquals(1,app_0.getTotalRequiredResources(priority));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,false,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,1,false,priority,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
app_0_requests_0.clear();
a.assignContainers(clusterResource,node_1_0);
verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_1_0),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
assertEquals(0,app_0.getTotalRequiredResources(priority));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testStolenReservedContainer() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
a.setMaxCapacity(1.0f);
final String user_0="user_0";
final String user_1="user_1";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_1,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_1,user_1);
String host_0="127.0.0.1";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,4 * GB);
String host_1="127.0.0.2";
FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,DEFAULT_RACK,0,4 * GB);
final int numNodes=3;
Resource clusterResource=Resources.createResource(numNodes * (4 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,2 * GB,1,true,priority,recordFactory)));
ArrayList appRequests_1=new ArrayList(4);
appRequests_1.add(TestUtils.createResourceRequest(host_0,4 * GB,1,true,priority,recordFactory));
appRequests_1.add(TestUtils.createResourceRequest(DEFAULT_RACK,4 * GB,1,true,priority,recordFactory));
appRequests_1.add(TestUtils.createResourceRequest(ResourceRequest.ANY,4 * GB,2,true,priority,recordFactory));
app_1.updateResourceRequests(appRequests_1);
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(2 * GB,a.getMetrics().getAllocatedMB());
assertEquals(0 * GB,a.getMetrics().getAvailableMB());
a.assignContainers(clusterResource,node_0);
assertEquals(6 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(2 * GB,node_0.getUsedResource().getMemory());
assertEquals(4 * GB,a.getMetrics().getReservedMB());
assertEquals(2 * GB,a.getMetrics().getAllocatedMB());
doReturn(-1).when(a).getNodeLocalityDelay();
a.assignContainers(clusterResource,node_1);
assertEquals(10 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(4 * GB,node_1.getUsedResource().getMemory());
assertEquals(4 * GB,a.getMetrics().getReservedMB());
assertEquals(6 * GB,a.getMetrics().getAllocatedMB());
RMContainer rmContainer=app_0.getLiveContainers().iterator().next();
a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
a.assignContainers(clusterResource,node_0);
assertEquals(8 * GB,a.getUsedResources().getMemory());
assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(8 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(4 * GB,node_0.getUsedResource().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(8 * GB,a.getMetrics().getAllocatedMB());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleQueueWithMultipleUsers() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
a.setMaxCapacity(1.0f);
final String user_0="user_0";
final String user_1="user_1";
final String user_2="user_2";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_1,user_0);
final ApplicationAttemptId appAttemptId_2=TestUtils.getMockApplicationAttemptId(2,0);
FiCaSchedulerApp app_2=new FiCaSchedulerApp(appAttemptId_2,user_1,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_2,user_1);
final ApplicationAttemptId appAttemptId_3=TestUtils.getMockApplicationAttemptId(3,0);
FiCaSchedulerApp app_3=new FiCaSchedulerApp(appAttemptId_3,user_2,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_3,user_2);
String host_0="127.0.0.1";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,8 * GB);
final int numNodes=1;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,10,true,priority,recordFactory)));
app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,10,true,priority,recordFactory)));
a.assignContainers(clusterResource,node_0);
assertEquals(1 * GB,a.getUsedResources().getMemory());
assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
a.setUserLimit(25);
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
app_2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,3 * GB,1,true,priority,recordFactory)));
app_3.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory)));
a.setUserLimitFactor(10);
a.assignContainers(clusterResource,node_0);
assertEquals(5 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_3.getCurrentConsumption().getMemory());
a.assignContainers(clusterResource,node_0);
assertEquals(6 * GB,a.getUsedResources().getMemory());
assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_3.getCurrentConsumption().getMemory());
a.setMaxCapacity(0.5f);
a.assignContainers(clusterResource,node_0);
assertEquals(6 * GB,a.getUsedResources().getMemory());
assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_3.getCurrentConsumption().getMemory());
a.setMaxCapacity(1.0f);
a.setUserLimitFactor(1);
a.assignContainers(clusterResource,node_0);
assertEquals(7 * GB,a.getUsedResources().getMemory());
assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(1 * GB,app_3.getCurrentConsumption().getMemory());
a.assignContainers(clusterResource,node_0);
assertEquals(8 * GB,a.getUsedResources().getMemory());
assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(2 * GB,app_3.getCurrentConsumption().getMemory());
for ( RMContainer rmContainer : app_0.getLiveContainers()) {
a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
}
assertEquals(5 * GB,a.getUsedResources().getMemory());
assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(2 * GB,app_3.getCurrentConsumption().getMemory());
for ( RMContainer rmContainer : app_2.getLiveContainers()) {
a.completedContainer(clusterResource,app_2,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
}
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(2 * GB,app_3.getCurrentConsumption().getMemory());
for ( RMContainer rmContainer : app_3.getLiveContainers()) {
a.completedContainer(clusterResource,app_3,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
}
assertEquals(0 * GB,a.getUsedResources().getMemory());
assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_3.getCurrentConsumption().getMemory());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testUserLimits() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
a.setMaxCapacity(1.0f);
final String user_0="user_0";
final String user_1="user_1";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_1,user_0);
final ApplicationAttemptId appAttemptId_2=TestUtils.getMockApplicationAttemptId(2,0);
FiCaSchedulerApp app_2=new FiCaSchedulerApp(appAttemptId_2,user_1,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_2,user_1);
String host_0="127.0.0.1";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,8 * GB);
String host_1="127.0.0.2";
FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,DEFAULT_RACK,0,8 * GB);
final int numNodes=2;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,2 * GB,1,true,priority,recordFactory)));
app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory)));
a.setUserLimit(50);
a.setUserLimitFactor(2);
assertEquals("There should only be 1 active user!",1,a.getActiveUsersManager().getNumActiveUsers());
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
a.assignContainers(clusterResource,node_0);
assertEquals(3 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory());
a.assignContainers(clusterResource,node_1);
assertEquals(4 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(2 * GB,app_1.getCurrentConsumption().getMemory());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testInheritedQueueAcls() throws IOException {
UserGroupInformation user=UserGroupInformation.getCurrentUser();
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
LeafQueue b=stubLeafQueue((LeafQueue)queues.get(B));
ParentQueue c=(ParentQueue)queues.get(C);
LeafQueue c1=stubLeafQueue((LeafQueue)queues.get(C1));
assertFalse(root.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertTrue(a.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertTrue(b.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertFalse(c.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertFalse(c1.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertTrue(hasQueueACL(a.getQueueUserAclInfo(user),QueueACL.SUBMIT_APPLICATIONS));
assertTrue(hasQueueACL(b.getQueueUserAclInfo(user),QueueACL.SUBMIT_APPLICATIONS));
assertFalse(hasQueueACL(c.getQueueUserAclInfo(user),QueueACL.SUBMIT_APPLICATIONS));
assertFalse(hasQueueACL(c1.getQueueUserAclInfo(user),QueueACL.SUBMIT_APPLICATIONS));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testInitializeQueue() throws Exception {
final float epsilon=1e-5f;
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
assertEquals(0.085,a.getCapacity(),epsilon);
assertEquals(0.085,a.getAbsoluteCapacity(),epsilon);
assertEquals(0.2,a.getMaximumCapacity(),epsilon);
assertEquals(0.2,a.getAbsoluteMaximumCapacity(),epsilon);
LeafQueue b=stubLeafQueue((LeafQueue)queues.get(B));
assertEquals(0.80,b.getCapacity(),epsilon);
assertEquals(0.80,b.getAbsoluteCapacity(),epsilon);
assertEquals(0.99,b.getMaximumCapacity(),epsilon);
assertEquals(0.99,b.getAbsoluteMaximumCapacity(),epsilon);
ParentQueue c=(ParentQueue)queues.get(C);
assertEquals(0.015,c.getCapacity(),epsilon);
assertEquals(0.015,c.getAbsoluteCapacity(),epsilon);
assertEquals(0.1,c.getMaximumCapacity(),epsilon);
assertEquals(0.1,c.getAbsoluteMaximumCapacity(),epsilon);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testHeadroomWithMaxCap() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
a.setMaxCapacity(1.0f);
final String user_0="user_0";
final String user_1="user_1";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_1,user_0);
final ApplicationAttemptId appAttemptId_2=TestUtils.getMockApplicationAttemptId(2,0);
FiCaSchedulerApp app_2=new FiCaSchedulerApp(appAttemptId_2,user_1,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_2,user_1);
String host_0="127.0.0.1";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,8 * GB);
String host_1="127.0.0.2";
FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,DEFAULT_RACK,0,8 * GB);
final int numNodes=2;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),1);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,2 * GB,1,true,priority,recordFactory)));
app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory)));
a.setUserLimit(50);
a.setUserLimitFactor(2);
assertEquals("There should only be 1 active user!",1,a.getActiveUsersManager().getNumActiveUsers());
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_0.getHeadroom().getMemory());
assertEquals(0 * GB,app_1.getHeadroom().getMemory());
a.assignContainers(clusterResource,node_0);
assertEquals(3 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_0.getHeadroom().getMemory());
assertEquals(0 * GB,app_1.getHeadroom().getMemory());
a.setMaxCapacity(.1f);
app_2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,1,true,priority,recordFactory)));
assertEquals(2,a.getActiveUsersManager().getNumActiveUsers());
a.assignContainers(clusterResource,node_1);
assertEquals(3 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_0.getHeadroom().getMemory());
assertEquals(0 * GB,app_1.getHeadroom().getMemory());
LOG.info("here");
app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,0,true,priority,recordFactory)));
assertEquals(1,a.getActiveUsersManager().getNumActiveUsers());
a.assignContainers(clusterResource,node_1);
assertEquals(1 * GB,app_2.getHeadroom().getMemory());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testLocalityScheduling() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
String user_0="user_0";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=spy(new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext));
a.submitApplicationAttempt(app_0,user_0);
String host_0="127.0.0.1";
String rack_0="rack_0";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,rack_0,0,8 * GB);
String host_1="127.0.0.2";
String rack_1="rack_1";
FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,rack_1,0,8 * GB);
String host_2="127.0.0.3";
String rack_2="rack_2";
FiCaSchedulerNode node_2=TestUtils.getMockNode(host_2,rack_2,0,8 * GB);
final int numNodes=3;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
List app_0_requests_0=new ArrayList();
app_0_requests_0.add(TestUtils.createResourceRequest(host_0,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_0,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(host_1,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,3,true,priority,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
CSAssignment assignment=null;
assignment=a.assignContainers(clusterResource,node_2);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(1,app_0.getSchedulingOpportunities(priority));
assertEquals(3,app_0.getTotalRequiredResources(priority));
assertEquals(NodeType.NODE_LOCAL,assignment.getType());
assignment=a.assignContainers(clusterResource,node_2);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(2,app_0.getSchedulingOpportunities(priority));
assertEquals(3,app_0.getTotalRequiredResources(priority));
assertEquals(NodeType.NODE_LOCAL,assignment.getType());
assignment=a.assignContainers(clusterResource,node_2);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(3,app_0.getSchedulingOpportunities(priority));
assertEquals(3,app_0.getTotalRequiredResources(priority));
assertEquals(NodeType.NODE_LOCAL,assignment.getType());
assignment=a.assignContainers(clusterResource,node_2);
verify(app_0).allocate(eq(NodeType.OFF_SWITCH),eq(node_2),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(4,app_0.getSchedulingOpportunities(priority));
assertEquals(2,app_0.getTotalRequiredResources(priority));
assertEquals(NodeType.OFF_SWITCH,assignment.getType());
assignment=a.assignContainers(clusterResource,node_0);
verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_0),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
assertEquals(1,app_0.getTotalRequiredResources(priority));
assertEquals(NodeType.NODE_LOCAL,assignment.getType());
assignment=a.assignContainers(clusterResource,node_1);
verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_1),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
assertEquals(0,app_0.getTotalRequiredResources(priority));
assertEquals(NodeType.NODE_LOCAL,assignment.getType());
app_0_requests_0.clear();
app_0_requests_0.add(TestUtils.createResourceRequest(host_1,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
assertEquals(2,app_0.getTotalRequiredResources(priority));
String host_3="127.0.0.4";
FiCaSchedulerNode node_3=TestUtils.getMockNode(host_3,rack_1,0,8 * GB);
doReturn(1).when(a).getNodeLocalityDelay();
assignment=a.assignContainers(clusterResource,node_3);
assertEquals(1,app_0.getSchedulingOpportunities(priority));
assertEquals(2,app_0.getTotalRequiredResources(priority));
assertEquals(NodeType.NODE_LOCAL,assignment.getType());
assignment=a.assignContainers(clusterResource,node_3);
verify(app_0).allocate(eq(NodeType.RACK_LOCAL),eq(node_3),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
assertEquals(1,app_0.getTotalRequiredResources(priority));
assertEquals(NodeType.RACK_LOCAL,assignment.getType());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppAttemptMetrics() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(B));
final String user_0="user_0";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,1);
AppAddedSchedulerEvent addAppEvent=new AppAddedSchedulerEvent(appAttemptId_0.getApplicationId(),a.getQueueName(),user_0);
cs.handle(addAppEvent);
AppAttemptAddedSchedulerEvent addAttemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId_0,false);
cs.handle(addAttemptEvent);
AppAttemptRemovedSchedulerEvent event=new AppAttemptRemovedSchedulerEvent(appAttemptId_0,RMAppAttemptState.FAILED,false);
cs.handle(event);
assertEquals(0,a.getMetrics().getAppsPending());
assertEquals(0,a.getMetrics().getAppsFailed());
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(0,2);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,null,rmContext);
a.submitApplicationAttempt(app_1,user_0);
assertEquals(1,a.getMetrics().getAppsSubmitted());
assertEquals(1,a.getMetrics().getAppsPending());
event=new AppAttemptRemovedSchedulerEvent(appAttemptId_0,RMAppAttemptState.FINISHED,false);
cs.handle(event);
AppRemovedSchedulerEvent rEvent=new AppRemovedSchedulerEvent(appAttemptId_0.getApplicationId(),RMAppState.FINISHED);
cs.handle(rEvent);
assertEquals(1,a.getMetrics().getAppsSubmitted());
assertEquals(0,a.getMetrics().getAppsPending());
assertEquals(0,a.getMetrics().getAppsFailed());
assertEquals(1,a.getMetrics().getAppsCompleted());
QueueMetrics userMetrics=a.getMetrics().getUserMetrics(user_0);
assertEquals(1,userMetrics.getAppsSubmitted());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testApplicationPriorityScheduling() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
String user_0="user_0";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=spy(new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext));
a.submitApplicationAttempt(app_0,user_0);
String host_0="127.0.0.1";
String rack_0="rack_0";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,rack_0,0,8 * GB);
String host_1="127.0.0.2";
String rack_1="rack_1";
FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,rack_1,0,8 * GB);
String host_2="127.0.0.3";
String rack_2="rack_2";
FiCaSchedulerNode node_2=TestUtils.getMockNode(host_2,rack_2,0,8 * GB);
final int numNodes=3;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),1);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
List app_0_requests_0=new ArrayList();
Priority priority_1=TestUtils.createMockPriority(1);
app_0_requests_0.add(TestUtils.createResourceRequest(host_0,1 * GB,1,true,priority_1,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_0,1 * GB,1,true,priority_1,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(host_1,1 * GB,1,true,priority_1,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,true,priority_1,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority_1,recordFactory));
Priority priority_2=TestUtils.createMockPriority(2);
app_0_requests_0.add(TestUtils.createResourceRequest(host_2,2 * GB,1,true,priority_2,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_2,2 * GB,1,true,priority_2,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,2 * GB,1,true,priority_2,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
a.assignContainers(clusterResource,node_2);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),eq(priority_1),any(ResourceRequest.class),any(Container.class));
assertEquals(1,app_0.getSchedulingOpportunities(priority_1));
assertEquals(2,app_0.getTotalRequiredResources(priority_1));
verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),eq(priority_2),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority_2));
assertEquals(1,app_0.getTotalRequiredResources(priority_2));
a.assignContainers(clusterResource,node_2);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),eq(priority_1),any(ResourceRequest.class),any(Container.class));
assertEquals(2,app_0.getSchedulingOpportunities(priority_1));
assertEquals(2,app_0.getTotalRequiredResources(priority_1));
verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),eq(priority_2),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority_2));
assertEquals(1,app_0.getTotalRequiredResources(priority_2));
a.assignContainers(clusterResource,node_2);
verify(app_0).allocate(eq(NodeType.OFF_SWITCH),eq(node_2),eq(priority_1),any(ResourceRequest.class),any(Container.class));
assertEquals(3,app_0.getSchedulingOpportunities(priority_1));
assertEquals(1,app_0.getTotalRequiredResources(priority_1));
verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),eq(priority_2),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority_2));
assertEquals(1,app_0.getTotalRequiredResources(priority_2));
a.assignContainers(clusterResource,node_0);
verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_0),eq(priority_1),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority_1));
assertEquals(0,app_0.getTotalRequiredResources(priority_1));
verify(app_0,never()).allocate(any(NodeType.class),eq(node_0),eq(priority_2),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority_2));
assertEquals(1,app_0.getTotalRequiredResources(priority_2));
a.assignContainers(clusterResource,node_1);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_1),eq(priority_1),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority_1));
assertEquals(0,app_0.getTotalRequiredResources(priority_1));
verify(app_0).allocate(eq(NodeType.OFF_SWITCH),eq(node_1),eq(priority_2),any(ResourceRequest.class),any(Container.class));
assertEquals(1,app_0.getSchedulingOpportunities(priority_2));
assertEquals(0,app_0.getTotalRequiredResources(priority_2));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testReservationExchange() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
a.setMaxCapacity(1.0f);
a.setUserLimitFactor(10);
final String user_0="user_0";
final String user_1="user_1";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_1,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_1,user_1);
String host_0="127.0.0.1";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,4 * GB);
String host_1="127.0.0.2";
FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,DEFAULT_RACK,0,4 * GB);
final int numNodes=3;
Resource clusterResource=Resources.createResource(numNodes * (4 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(4 * GB,16));
when(a.getMaximumAllocation()).thenReturn(Resources.createResource(4 * GB,16));
when(a.getMinimumAllocationFactor()).thenReturn(0.25f);
Priority priority=TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory)));
app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,4 * GB,1,true,priority,recordFactory)));
a.assignContainers(clusterResource,node_0);
assertEquals(1 * GB,a.getUsedResources().getMemory());
assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
a.assignContainers(clusterResource,node_0);
assertEquals(6 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(2 * GB,node_0.getUsedResource().getMemory());
RMContainer rmContainer=app_0.getLiveContainers().iterator().next();
a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
a.assignContainers(clusterResource,node_0);
assertEquals(5 * GB,a.getUsedResources().getMemory());
assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(1 * GB,node_0.getUsedResource().getMemory());
assertEquals(1,app_1.getReReservations(priority));
a.assignContainers(clusterResource,node_0);
assertEquals(5 * GB,a.getUsedResources().getMemory());
assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(1 * GB,node_0.getUsedResource().getMemory());
assertEquals(2,app_1.getReReservations(priority));
a.assignContainers(clusterResource,node_1);
assertEquals(9 * GB,a.getUsedResources().getMemory());
assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(4 * GB,node_1.getUsedResource().getMemory());
assertEquals(2,app_1.getReReservations(priority));
rmContainer=app_0.getLiveContainers().iterator().next();
a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
CSAssignment assignment=a.assignContainers(clusterResource,node_0);
assertEquals(8 * GB,a.getUsedResources().getMemory());
assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(0 * GB,node_0.getUsedResource().getMemory());
assertEquals(4 * GB,assignment.getExcessReservation().getContainer().getResource().getMemory());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSchedulingConstraints() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
String user_0="user_0";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=spy(new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext));
a.submitApplicationAttempt(app_0,user_0);
String host_0_0="127.0.0.1";
String rack_0="rack_0";
FiCaSchedulerNode node_0_0=TestUtils.getMockNode(host_0_0,rack_0,0,8 * GB);
String host_0_1="127.0.0.2";
FiCaSchedulerNode node_0_1=TestUtils.getMockNode(host_0_1,rack_0,0,8 * GB);
String host_1_0="127.0.0.3";
String rack_1="rack_1";
FiCaSchedulerNode node_1_0=TestUtils.getMockNode(host_1_0,rack_1,0,8 * GB);
final int numNodes=3;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
List app_0_requests_0=new ArrayList();
app_0_requests_0.add(TestUtils.createResourceRequest(host_0_0,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(host_0_1,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_0,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(host_1_0,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,true,priority,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
app_0_requests_0.clear();
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,1,true,priority,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
a.assignContainers(clusterResource,node_0_0);
verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_0_0),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
assertEquals(0,app_0.getTotalRequiredResources(priority));
a.assignContainers(clusterResource,node_1_0);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_1_0),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
assertEquals(0,app_0.getTotalRequiredResources(priority));
app_0_requests_0.clear();
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,1,true,priority,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
a.assignContainers(clusterResource,node_0_1);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_1_0),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(1,app_0.getSchedulingOpportunities(priority));
assertEquals(1,app_0.getTotalRequiredResources(priority));
a.assignContainers(clusterResource,node_1_0);
verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_1_0),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
assertEquals(0,app_0.getTotalRequiredResources(priority));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testReservation() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
a.setMaxCapacity(1.0f);
final String user_0="user_0";
final String user_1="user_1";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_1,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_1,user_1);
String host_0="127.0.0.1";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,4 * GB);
final int numNodes=2;
Resource clusterResource=Resources.createResource(numNodes * (4 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory)));
app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,4 * GB,1,true,priority,recordFactory)));
a.assignContainers(clusterResource,node_0);
assertEquals(1 * GB,a.getUsedResources().getMemory());
assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(1 * GB,a.getMetrics().getAllocatedMB());
assertEquals(0 * GB,a.getMetrics().getAvailableMB());
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(2 * GB,a.getMetrics().getAllocatedMB());
a.assignContainers(clusterResource,node_0);
assertEquals(6 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(2 * GB,node_0.getUsedResource().getMemory());
assertEquals(4 * GB,a.getMetrics().getReservedMB());
assertEquals(2 * GB,a.getMetrics().getAllocatedMB());
RMContainer rmContainer=app_0.getLiveContainers().iterator().next();
a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
a.assignContainers(clusterResource,node_0);
assertEquals(5 * GB,a.getUsedResources().getMemory());
assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(1 * GB,node_0.getUsedResource().getMemory());
assertEquals(4 * GB,a.getMetrics().getReservedMB());
assertEquals(1 * GB,a.getMetrics().getAllocatedMB());
rmContainer=app_0.getLiveContainers().iterator().next();
a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
a.assignContainers(clusterResource,node_0);
assertEquals(4 * GB,a.getUsedResources().getMemory());
assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(4 * GB,node_0.getUsedResource().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(4 * GB,a.getMetrics().getAllocatedMB());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testActivateApplicationAfterQueueRefresh() throws Exception {
LeafQueue e=stubLeafQueue((LeafQueue)queues.get(E));
final String user_e="user_e";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_e,e,mock(ActiveUsersManager.class),rmContext);
e.submitApplicationAttempt(app_0,user_e);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_e,e,mock(ActiveUsersManager.class),rmContext);
e.submitApplicationAttempt(app_1,user_e);
final ApplicationAttemptId appAttemptId_2=TestUtils.getMockApplicationAttemptId(2,0);
FiCaSchedulerApp app_2=new FiCaSchedulerApp(appAttemptId_2,user_e,e,mock(ActiveUsersManager.class),rmContext);
e.submitApplicationAttempt(app_2,user_e);
assertEquals(2,e.activeApplications.size());
assertEquals(1,e.pendingApplications.size());
csConf.setDouble(CapacitySchedulerConfiguration.MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT,CapacitySchedulerConfiguration.DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT * 2);
Map newQueues=new HashMap();
CSQueue newRoot=CapacityScheduler.parseQueue(csContext,csConf,null,CapacitySchedulerConfiguration.ROOT,newQueues,queues,TestUtils.spyHook);
queues=newQueues;
root.reinitialize(newRoot,cs.getClusterResource());
assertEquals(3,e.activeApplications.size());
assertEquals(0,e.pendingApplications.size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testQueueAcl() throws Exception {
setupMultiLevelQueues(csConf);
csConf.setAcl(CapacitySchedulerConfiguration.ROOT,QueueACL.SUBMIT_APPLICATIONS," ");
csConf.setAcl(CapacitySchedulerConfiguration.ROOT,QueueACL.ADMINISTER_QUEUE," ");
final String Q_C=CapacitySchedulerConfiguration.ROOT + "." + C;
csConf.setAcl(Q_C,QueueACL.ADMINISTER_QUEUE,"*");
final String Q_C11=Q_C + "." + C1+ "."+ C11;
csConf.setAcl(Q_C11,QueueACL.SUBMIT_APPLICATIONS,"*");
Map queues=new HashMap();
CSQueue root=CapacityScheduler.parseQueue(csContext,csConf,null,CapacitySchedulerConfiguration.ROOT,queues,queues,TestUtils.spyHook);
UserGroupInformation user=UserGroupInformation.getCurrentUser();
ParentQueue c=(ParentQueue)queues.get(C);
ParentQueue c1=(ParentQueue)queues.get(C1);
ParentQueue c11=(ParentQueue)queues.get(C11);
ParentQueue c111=(ParentQueue)queues.get(C111);
assertFalse(root.hasAccess(QueueACL.ADMINISTER_QUEUE,user));
List aclInfos=root.getQueueUserAclInfo(user);
assertFalse(hasQueueACL(aclInfos,QueueACL.ADMINISTER_QUEUE,"root"));
assertFalse(root.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertFalse(hasQueueACL(aclInfos,QueueACL.SUBMIT_APPLICATIONS,"root"));
assertTrue(c.hasAccess(QueueACL.ADMINISTER_QUEUE,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.ADMINISTER_QUEUE,"c"));
assertFalse(c.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertFalse(hasQueueACL(aclInfos,QueueACL.SUBMIT_APPLICATIONS,"c"));
assertTrue(c1.hasAccess(QueueACL.ADMINISTER_QUEUE,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.ADMINISTER_QUEUE,"c1"));
assertFalse(c1.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertFalse(hasQueueACL(aclInfos,QueueACL.SUBMIT_APPLICATIONS,"c1"));
assertTrue(c11.hasAccess(QueueACL.ADMINISTER_QUEUE,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.ADMINISTER_QUEUE,"c11"));
assertTrue(c11.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.SUBMIT_APPLICATIONS,"c11"));
assertTrue(c111.hasAccess(QueueACL.ADMINISTER_QUEUE,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.ADMINISTER_QUEUE,"c111"));
assertTrue(c111.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.SUBMIT_APPLICATIONS,"c111"));
reset(c);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testBasicDRFAssignment() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node=MockNodes.newNodeInfo(1,BuilderUtils.newResource(8192,5));
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
scheduler.handle(nodeEvent);
ApplicationAttemptId appAttId1=createSchedulingRequest(2048,1,"queue1","user1",2);
FSAppAttempt app1=scheduler.getSchedulerApp(appAttId1);
ApplicationAttemptId appAttId2=createSchedulingRequest(1024,2,"queue1","user1",2);
FSAppAttempt app2=scheduler.getSchedulerApp(appAttId2);
DominantResourceFairnessPolicy drfPolicy=new DominantResourceFairnessPolicy();
drfPolicy.initialize(scheduler.getClusterResource());
scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(updateEvent);
Assert.assertEquals(1,app1.getLiveContainers().size());
Assert.assertEquals(0,app2.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(1,app1.getLiveContainers().size());
Assert.assertEquals(1,app2.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(2,app1.getLiveContainers().size());
Assert.assertEquals(1,app2.getLiveContainers().size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000) public void testRecoverRequestAfterPreemption() throws Exception {
conf.setLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL,10);
MockClock clock=new MockClock();
scheduler.setClock(clock);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
Priority priority=Priority.newInstance(20);
String host="127.0.0.1";
int GB=1024;
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16 * 1024,4),0,host);
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
scheduler.handle(nodeEvent);
List ask=new ArrayList();
ResourceRequest nodeLocalRequest=createResourceRequest(GB,1,host,priority.getPriority(),1,true);
ResourceRequest rackLocalRequest=createResourceRequest(GB,1,node.getRackName(),priority.getPriority(),1,true);
ResourceRequest offRackRequest=createResourceRequest(GB,1,ResourceRequest.ANY,priority.getPriority(),1,true);
ask.add(nodeLocalRequest);
ask.add(rackLocalRequest);
ask.add(offRackRequest);
ApplicationAttemptId appAttemptId=createSchedulingRequest("queueA","user1",ask);
scheduler.update();
NodeUpdateSchedulerEvent nodeUpdate=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeUpdate);
assertEquals(1,scheduler.getSchedulerApp(appAttemptId).getLiveContainers().size());
FSAppAttempt app=scheduler.getSchedulerApp(appAttemptId);
Assert.assertNull(app.getResourceRequest(priority,host));
ContainerId containerId1=ContainerId.newInstance(appAttemptId,1);
RMContainer rmContainer=app.getRMContainer(containerId1);
scheduler.warnOrKillContainer(rmContainer);
clock.tick(5);
scheduler.warnOrKillContainer(rmContainer);
List requests=rmContainer.getResourceRequests();
Assert.assertEquals(3,requests.size());
for ( ResourceRequest request : requests) {
Assert.assertEquals(1,app.getResourceRequest(priority,request.getResourceName()).getNumContainers());
}
scheduler.update();
scheduler.handle(nodeUpdate);
List containers=scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,null).getContainers();
Assert.assertTrue(containers.size() == 1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveMakesAppRunnable() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
QueueManager queueMgr=scheduler.getQueueManager();
FSLeafQueue oldQueue=queueMgr.getLeafQueue("queue1",true);
FSLeafQueue targetQueue=queueMgr.getLeafQueue("queue2",true);
scheduler.getAllocationConfiguration().queueMaxApps.put("root.queue1",0);
ApplicationAttemptId appAttId=createSchedulingRequest(1024,1,"queue1","user1",3);
FSAppAttempt app=scheduler.getSchedulerApp(appAttId);
assertTrue(oldQueue.getNonRunnableAppSchedulables().contains(app));
scheduler.moveApplication(appAttId.getApplicationId(),"queue2");
assertFalse(oldQueue.getNonRunnableAppSchedulables().contains(app));
assertFalse(targetQueue.getNonRunnableAppSchedulables().contains(app));
assertTrue(targetQueue.getRunnableAppSchedulables().contains(app));
assertEquals(1,targetQueue.getNumRunnableApps());
assertEquals(1,queueMgr.getRootQueue().getNumRunnableApps());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testCancelStrictLocality() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(1024),2,"127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
ApplicationAttemptId attId1=createSchedulingRequest(1024,"queue1","user1",0);
ResourceRequest nodeRequest=createResourceRequest(1024,node1.getHostName(),1,1,true);
ResourceRequest rackRequest=createResourceRequest(1024,"rack1",1,1,false);
ResourceRequest anyRequest=createResourceRequest(1024,ResourceRequest.ANY,1,1,false);
createSchedulingRequestExistingApplication(nodeRequest,attId1);
createSchedulingRequestExistingApplication(rackRequest,attId1);
createSchedulingRequestExistingApplication(anyRequest,attId1);
scheduler.update();
NodeUpdateSchedulerEvent node2UpdateEvent=new NodeUpdateSchedulerEvent(node2);
FSAppAttempt app=scheduler.getSchedulerApp(attId1);
for (int i=0; i < 10; i++) {
scheduler.handle(node2UpdateEvent);
assertEquals(0,app.getLiveContainers().size());
}
List update=Arrays.asList(createResourceRequest(1024,node1.getHostName(),1,0,true),createResourceRequest(1024,"rack1",1,0,true),createResourceRequest(1024,ResourceRequest.ANY,1,1,true));
scheduler.allocate(attId1,update,new ArrayList(),null,null);
scheduler.handle(node2UpdateEvent);
assertEquals(1,app.getLiveContainers().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testQueueMaxAMShare() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println("0.2 ");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(20480,20),0,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
scheduler.update();
FSLeafQueue queue1=scheduler.getQueueManager().getLeafQueue("queue1",true);
assertEquals("Queue queue1's fair share should be 0",0,queue1.getFairShare().getMemory());
createSchedulingRequest(1 * 1024,"root.default","user1");
scheduler.update();
scheduler.handle(updateEvent);
Resource amResource1=Resource.newInstance(1024,1);
Resource amResource2=Resource.newInstance(2048,2);
Resource amResource3=Resource.newInstance(1860,2);
int amPriority=RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority();
ApplicationAttemptId attId1=createAppAttemptId(1,1);
createApplicationWithAMResource(attId1,"queue1","user1",amResource1);
createSchedulingRequestExistingApplication(1024,1,amPriority,attId1);
FSAppAttempt app1=scheduler.getSchedulerApp(attId1);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application1's AM requests 1024 MB memory",1024,app1.getAMResource().getMemory());
assertEquals("Application1's AM should be running",1,app1.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 1024 MB memory",1024,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId2=createAppAttemptId(2,1);
createApplicationWithAMResource(attId2,"queue1","user1",amResource1);
createSchedulingRequestExistingApplication(1024,1,amPriority,attId2);
FSAppAttempt app2=scheduler.getSchedulerApp(attId2);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application2's AM requests 1024 MB memory",1024,app2.getAMResource().getMemory());
assertEquals("Application2's AM should be running",1,app2.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId3=createAppAttemptId(3,1);
createApplicationWithAMResource(attId3,"queue1","user1",amResource1);
createSchedulingRequestExistingApplication(1024,1,amPriority,attId3);
FSAppAttempt app3=scheduler.getSchedulerApp(attId3);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application3's AM requests 1024 MB memory",1024,app3.getAMResource().getMemory());
assertEquals("Application3's AM should not be running",0,app3.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
createSchedulingRequestExistingApplication(1024,1,attId1);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application1 should have two running containers",2,app1.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
AppAttemptRemovedSchedulerEvent appRemovedEvent1=new AppAttemptRemovedSchedulerEvent(attId1,RMAppAttemptState.FINISHED,false);
scheduler.update();
scheduler.handle(appRemovedEvent1);
scheduler.handle(updateEvent);
assertEquals("Application1's AM should be finished",0,app1.getLiveContainers().size());
assertEquals("Application3's AM should be running",1,app3.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId4=createAppAttemptId(4,1);
createApplicationWithAMResource(attId4,"queue1","user1",amResource2);
createSchedulingRequestExistingApplication(2048,2,amPriority,attId4);
FSAppAttempt app4=scheduler.getSchedulerApp(attId4);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application4's AM requests 2048 MB memory",2048,app4.getAMResource().getMemory());
assertEquals("Application4's AM should not be running",0,app4.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId5=createAppAttemptId(5,1);
createApplicationWithAMResource(attId5,"queue1","user1",amResource2);
createSchedulingRequestExistingApplication(2048,2,amPriority,attId5);
FSAppAttempt app5=scheduler.getSchedulerApp(attId5);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application5's AM requests 2048 MB memory",2048,app5.getAMResource().getMemory());
assertEquals("Application5's AM should not be running",0,app5.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
AppAttemptRemovedSchedulerEvent appRemovedEvent4=new AppAttemptRemovedSchedulerEvent(attId4,RMAppAttemptState.KILLED,false);
scheduler.handle(appRemovedEvent4);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application5's AM should not be running",0,app5.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
AppAttemptRemovedSchedulerEvent appRemovedEvent2=new AppAttemptRemovedSchedulerEvent(attId2,RMAppAttemptState.FINISHED,false);
AppAttemptRemovedSchedulerEvent appRemovedEvent3=new AppAttemptRemovedSchedulerEvent(attId3,RMAppAttemptState.FINISHED,false);
scheduler.handle(appRemovedEvent2);
scheduler.handle(appRemovedEvent3);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application2's AM should be finished",0,app2.getLiveContainers().size());
assertEquals("Application3's AM should be finished",0,app3.getLiveContainers().size());
assertEquals("Application5's AM should be running",1,app5.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId6=createAppAttemptId(6,1);
createApplicationWithAMResource(attId6,"queue1","user1",amResource3);
createSchedulingRequestExistingApplication(1860,2,amPriority,attId6);
FSAppAttempt app6=scheduler.getSchedulerApp(attId6);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application6's AM should not be running",0,app6.getLiveContainers().size());
assertEquals("Application6's AM requests 2048 MB memory",2048,app6.getAMResource().getMemory());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
AppAttemptRemovedSchedulerEvent appRemovedEvent5=new AppAttemptRemovedSchedulerEvent(attId5,RMAppAttemptState.FINISHED,false);
AppAttemptRemovedSchedulerEvent appRemovedEvent6=new AppAttemptRemovedSchedulerEvent(attId6,RMAppAttemptState.FINISHED,false);
scheduler.handle(appRemovedEvent5);
scheduler.handle(appRemovedEvent6);
scheduler.update();
assertEquals("Queue1's AM resource usage should be 0",0,queue1.getAmResourceUsage().getMemory());
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier
/**
* Test to verify the behavior of{@link FSQueue#assignContainer(FSSchedulerNode)})
* Create two queues under root (fifoQueue and fairParent), and two queues
* under fairParent (fairChild1 and fairChild2). Submit two apps to the
* fifoQueue and one each to the fairChild* queues, all apps requiring 4
* containers each of the total 16 container capacity
* Assert the number of containers for each app after 4, 8, 12 and 16 updates.
* @throws Exception
*/
@Test(timeout=5000) public void testAssignContainer() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
final String user="user1";
final String fifoQueue="fifo";
final String fairParent="fairParent";
final String fairChild1=fairParent + ".fairChild1";
final String fairChild2=fairParent + ".fairChild2";
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(8192,8),1,"127.0.0.1");
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(8192,8),2,"127.0.0.2");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent1);
scheduler.handle(nodeEvent2);
ApplicationAttemptId attId1=createSchedulingRequest(1024,fifoQueue,user,4);
ApplicationAttemptId attId2=createSchedulingRequest(1024,fairChild1,user,4);
ApplicationAttemptId attId3=createSchedulingRequest(1024,fairChild2,user,4);
ApplicationAttemptId attId4=createSchedulingRequest(1024,fifoQueue,user,4);
FSAppAttempt app1=scheduler.getSchedulerApp(attId1);
FSAppAttempt app2=scheduler.getSchedulerApp(attId2);
FSAppAttempt app3=scheduler.getSchedulerApp(attId3);
FSAppAttempt app4=scheduler.getSchedulerApp(attId4);
scheduler.getQueueManager().getLeafQueue(fifoQueue,true).setPolicy(SchedulingPolicy.parse("fifo"));
scheduler.update();
NodeUpdateSchedulerEvent updateEvent1=new NodeUpdateSchedulerEvent(node1);
NodeUpdateSchedulerEvent updateEvent2=new NodeUpdateSchedulerEvent(node2);
for (int i=0; i < 8; i++) {
scheduler.handle(updateEvent1);
scheduler.handle(updateEvent2);
if ((i + 1) % 2 == 0) {
String ERR="Wrong number of assigned containers after " + (i + 1) + " updates";
if (i < 4) {
assertEquals(ERR,(i + 1),app1.getLiveContainers().size());
assertEquals(ERR,0,app4.getLiveContainers().size());
}
else {
assertEquals(ERR,4,app1.getLiveContainers().size());
assertEquals(ERR,(i - 3),app4.getLiveContainers().size());
}
assertEquals(ERR,(i + 1) / 2,app2.getLiveContainers().size());
assertEquals(ERR,(i + 1) / 2,app3.getLiveContainers().size());
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testLowestCommonAncestorDeeperHierarchy() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
FSQueue aQueue=mock(FSLeafQueue.class);
FSQueue bQueue=mock(FSLeafQueue.class);
FSQueue a1Queue=mock(FSLeafQueue.class);
FSQueue b1Queue=mock(FSLeafQueue.class);
when(a1Queue.getName()).thenReturn("root.queue1.a.a1");
when(b1Queue.getName()).thenReturn("root.queue1.b.b1");
when(aQueue.getChildQueues()).thenReturn(Arrays.asList(a1Queue));
when(bQueue.getChildQueues()).thenReturn(Arrays.asList(b1Queue));
QueueManager queueManager=scheduler.getQueueManager();
FSParentQueue queue1=queueManager.getParentQueue("queue1",true);
queue1.addChildQueue(aQueue);
queue1.addChildQueue(bQueue);
FSQueue ancestorQueue=scheduler.findLowestCommonAncestorQueue(a1Queue,b1Queue);
assertEquals(ancestorQueue,queue1);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testStrictLocality() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(1024),2,"127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
ApplicationAttemptId attId1=createSchedulingRequest(1024,"queue1","user1",0);
ResourceRequest nodeRequest=createResourceRequest(1024,node1.getHostName(),1,1,true);
ResourceRequest rackRequest=createResourceRequest(1024,node1.getRackName(),1,1,false);
ResourceRequest anyRequest=createResourceRequest(1024,ResourceRequest.ANY,1,1,false);
createSchedulingRequestExistingApplication(nodeRequest,attId1);
createSchedulingRequestExistingApplication(rackRequest,attId1);
createSchedulingRequestExistingApplication(anyRequest,attId1);
scheduler.update();
NodeUpdateSchedulerEvent node1UpdateEvent=new NodeUpdateSchedulerEvent(node1);
NodeUpdateSchedulerEvent node2UpdateEvent=new NodeUpdateSchedulerEvent(node2);
FSAppAttempt app=scheduler.getSchedulerApp(attId1);
for (int i=0; i < 10; i++) {
scheduler.handle(node2UpdateEvent);
assertEquals(0,app.getLiveContainers().size());
assertEquals(0,app.getReservedContainers().size());
}
scheduler.handle(node1UpdateEvent);
assertEquals(1,app.getLiveContainers().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testDRFHierarchicalQueues() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node=MockNodes.newNodeInfo(1,BuilderUtils.newResource(12288,12),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
scheduler.handle(nodeEvent);
ApplicationAttemptId appAttId1=createSchedulingRequest(3074,1,"queue1.subqueue1","user1",2);
Thread.sleep(3);
FSAppAttempt app1=scheduler.getSchedulerApp(appAttId1);
ApplicationAttemptId appAttId2=createSchedulingRequest(1024,3,"queue1.subqueue1","user1",2);
Thread.sleep(3);
FSAppAttempt app2=scheduler.getSchedulerApp(appAttId2);
ApplicationAttemptId appAttId3=createSchedulingRequest(2048,2,"queue1.subqueue2","user1",2);
Thread.sleep(3);
FSAppAttempt app3=scheduler.getSchedulerApp(appAttId3);
ApplicationAttemptId appAttId4=createSchedulingRequest(1024,2,"queue2","user1",2);
Thread.sleep(3);
FSAppAttempt app4=scheduler.getSchedulerApp(appAttId4);
DominantResourceFairnessPolicy drfPolicy=new DominantResourceFairnessPolicy();
drfPolicy.initialize(scheduler.getClusterResource());
scheduler.getQueueManager().getQueue("root").setPolicy(drfPolicy);
scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
scheduler.getQueueManager().getQueue("queue1.subqueue1").setPolicy(drfPolicy);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(updateEvent);
Assert.assertEquals(1,app1.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(1,app4.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(2,app4.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(1,app3.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(2,app3.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(1,app2.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(1,app1.getLiveContainers().size());
Assert.assertEquals(1,app2.getLiveContainers().size());
Assert.assertEquals(2,app3.getLiveContainers().size());
Assert.assertEquals(2,app4.getLiveContainers().size());
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetAppsInQueue() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
ApplicationAttemptId appAttId1=createSchedulingRequest(1024,1,"queue1.subqueue1","user1");
ApplicationAttemptId appAttId2=createSchedulingRequest(1024,1,"queue1.subqueue2","user1");
ApplicationAttemptId appAttId3=createSchedulingRequest(1024,1,"default","user1");
List apps=scheduler.getAppsInQueue("queue1.subqueue1");
assertEquals(1,apps.size());
assertEquals(appAttId1,apps.get(0));
apps=scheduler.getAppsInQueue("root.queue1.subqueue1");
assertEquals(1,apps.size());
assertEquals(appAttId1,apps.get(0));
apps=scheduler.getAppsInQueue("user1");
assertEquals(1,apps.size());
assertEquals(appAttId3,apps.get(0));
apps=scheduler.getAppsInQueue("root.user1");
assertEquals(1,apps.size());
assertEquals(appAttId3,apps.get(0));
apps=scheduler.getAppsInQueue("queue1");
Assert.assertEquals(2,apps.size());
Set appAttIds=Sets.newHashSet(apps.get(0),apps.get(1));
assertTrue(appAttIds.contains(appAttId1));
assertTrue(appAttIds.contains(appAttId2));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNoMoreCpuOnNode() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(2048,1),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
ApplicationAttemptId attId=createSchedulingRequest(1024,1,"default","user1",2);
FSAppAttempt app=scheduler.getSchedulerApp(attId);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent);
assertEquals(1,app.getLiveContainers().size());
scheduler.handle(updateEvent);
assertEquals(1,app.getLiveContainers().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testLowestCommonAncestorRootParent() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
FSLeafQueue aQueue=mock(FSLeafQueue.class);
FSLeafQueue bQueue=mock(FSLeafQueue.class);
when(aQueue.getName()).thenReturn("root.a");
when(bQueue.getName()).thenReturn("root.b");
QueueManager queueManager=scheduler.getQueueManager();
FSParentQueue queue1=queueManager.getParentQueue("root",false);
queue1.addChildQueue(aQueue);
queue1.addChildQueue(bQueue);
FSQueue ancestorQueue=scheduler.findLowestCommonAncestorQueue(aQueue,bQueue);
assertEquals(ancestorQueue,queue1);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@SuppressWarnings("unchecked") @Test public void testNotAllowSubmitApplication() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" userallow ");
out.println(" userallow ");
out.println(" ");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
int appId=this.APP_ID++;
String user="usernotallow";
String queue="queue1";
ApplicationId applicationId=MockApps.newAppID(appId);
String name=MockApps.newAppName();
ApplicationMasterService masterService=new ApplicationMasterService(resourceManager.getRMContext(),scheduler);
ApplicationSubmissionContext submissionContext=new ApplicationSubmissionContextPBImpl();
ContainerLaunchContext clc=BuilderUtils.newContainerLaunchContext(null,null,null,null,null,null);
submissionContext.setApplicationId(applicationId);
submissionContext.setAMContainerSpec(clc);
RMApp application=new RMAppImpl(applicationId,resourceManager.getRMContext(),conf,name,user,queue,submissionContext,scheduler,masterService,System.currentTimeMillis(),"YARN",null);
resourceManager.getRMContext().getRMApps().putIfAbsent(applicationId,application);
application.handle(new RMAppEvent(applicationId,RMAppEventType.START));
final int MAX_TRIES=20;
int numTries=0;
while (!application.getState().equals(RMAppState.SUBMITTED) && numTries < MAX_TRIES) {
try {
Thread.sleep(100);
}
catch ( InterruptedException ex) {
ex.printStackTrace();
}
numTries++;
}
assertEquals("The application doesn't reach SUBMITTED.",RMAppState.SUBMITTED,application.getState());
ApplicationAttemptId attId=ApplicationAttemptId.newInstance(applicationId,this.ATTEMPT_ID++);
scheduler.addApplication(attId.getApplicationId(),queue,user,false);
numTries=0;
while (application.getFinishTime() == 0 && numTries < MAX_TRIES) {
try {
Thread.sleep(100);
}
catch ( InterruptedException ex) {
ex.printStackTrace();
}
numTries++;
}
assertEquals(FinalApplicationStatus.FAILED,application.getFinalApplicationStatus());
}
APIUtilityVerifier NullVerifier
@Test public void testAclSubmitApplication() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" norealuserhasthisname ");
out.println(" norealuserhasthisname ");
out.println(" ");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
ApplicationAttemptId attId1=createSchedulingRequest(1024,"queue1","norealuserhasthisname",1);
ApplicationAttemptId attId2=createSchedulingRequest(1024,"queue1","norealuserhasthisname2",1);
FSAppAttempt app1=scheduler.getSchedulerApp(attId1);
assertNotNull("The application was not allowed",app1);
FSAppAttempt app2=scheduler.getSchedulerApp(attId2);
assertNull("The application was allowed",app2);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* If we update our ask to strictly request a node, it doesn't make sense to keep
* a reservation on another.
*/
@Test public void testReservationsStrictLocality() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1");
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(1024),2,"127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent2);
ApplicationAttemptId attId=createSchedulingRequest(1024,"queue1","user1",0);
FSAppAttempt app=scheduler.getSchedulerApp(attId);
ResourceRequest nodeRequest=createResourceRequest(1024,node2.getHostName(),1,2,true);
ResourceRequest rackRequest=createResourceRequest(1024,"rack1",1,2,true);
ResourceRequest anyRequest=createResourceRequest(1024,ResourceRequest.ANY,1,2,false);
createSchedulingRequestExistingApplication(nodeRequest,attId);
createSchedulingRequestExistingApplication(rackRequest,attId);
createSchedulingRequestExistingApplication(anyRequest,attId);
scheduler.update();
NodeUpdateSchedulerEvent nodeUpdateEvent=new NodeUpdateSchedulerEvent(node1);
scheduler.handle(nodeUpdateEvent);
assertEquals(1,app.getLiveContainers().size());
scheduler.handle(nodeUpdateEvent);
assertEquals(1,app.getReservedContainers().size());
rackRequest=createResourceRequest(1024,"rack1",1,1,false);
anyRequest=createResourceRequest(1024,ResourceRequest.ANY,1,1,false);
scheduler.allocate(attId,Arrays.asList(rackRequest,anyRequest),new ArrayList(),null,null);
scheduler.handle(nodeUpdateEvent);
assertEquals(0,app.getReservedContainers().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Two apps on one queue, one app on another
*/
@Test public void testBasicDRFWithQueues() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node=MockNodes.newNodeInfo(1,BuilderUtils.newResource(8192,7),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
scheduler.handle(nodeEvent);
ApplicationAttemptId appAttId1=createSchedulingRequest(3072,1,"queue1","user1",2);
FSAppAttempt app1=scheduler.getSchedulerApp(appAttId1);
ApplicationAttemptId appAttId2=createSchedulingRequest(2048,2,"queue1","user1",2);
FSAppAttempt app2=scheduler.getSchedulerApp(appAttId2);
ApplicationAttemptId appAttId3=createSchedulingRequest(1024,2,"queue2","user1",2);
FSAppAttempt app3=scheduler.getSchedulerApp(appAttId3);
DominantResourceFairnessPolicy drfPolicy=new DominantResourceFairnessPolicy();
drfPolicy.initialize(scheduler.getClusterResource());
scheduler.getQueueManager().getQueue("root").setPolicy(drfPolicy);
scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(updateEvent);
Assert.assertEquals(1,app1.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(1,app3.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(2,app3.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(1,app2.getLiveContainers().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=3000) public void testMaxAssign() throws Exception {
conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE,true);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16384,16),0,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
ApplicationAttemptId attId=createSchedulingRequest(1024,"root.default","user",8);
FSAppAttempt app=scheduler.getSchedulerApp(attId);
scheduler.maxAssign=2;
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Incorrect number of containers allocated",2,app.getLiveContainers().size());
scheduler.maxAssign=-1;
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Incorrect number of containers allocated",8,app.getLiveContainers().size());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testHostPortNodeName() throws Exception {
conf.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME,true);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1",1);
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(1024),2,"127.0.0.1",2);
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
ApplicationAttemptId attId1=createSchedulingRequest(1024,"queue1","user1",0);
ResourceRequest nodeRequest=createResourceRequest(1024,node1.getNodeID().getHost() + ":" + node1.getNodeID().getPort(),1,1,true);
ResourceRequest rackRequest=createResourceRequest(1024,node1.getRackName(),1,1,false);
ResourceRequest anyRequest=createResourceRequest(1024,ResourceRequest.ANY,1,1,false);
createSchedulingRequestExistingApplication(nodeRequest,attId1);
createSchedulingRequestExistingApplication(rackRequest,attId1);
createSchedulingRequestExistingApplication(anyRequest,attId1);
scheduler.update();
NodeUpdateSchedulerEvent node1UpdateEvent=new NodeUpdateSchedulerEvent(node1);
NodeUpdateSchedulerEvent node2UpdateEvent=new NodeUpdateSchedulerEvent(node2);
FSAppAttempt app=scheduler.getSchedulerApp(attId1);
for (int i=0; i < 10; i++) {
scheduler.handle(node2UpdateEvent);
assertEquals(0,app.getLiveContainers().size());
assertEquals(0,app.getReservedContainers().size());
}
scheduler.handle(node1UpdateEvent);
assertEquals(1,app.getLiveContainers().size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("resource") @Test public void testBlacklistNodes() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
final int GB=1024;
String host="127.0.0.1";
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16 * GB,16),0,host);
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
ApplicationAttemptId appAttemptId=createSchedulingRequest(GB,"root.default","user",1);
FSAppAttempt app=scheduler.getSchedulerApp(appAttemptId);
scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),Collections.singletonList(host),null);
assertTrue(app.isBlacklisted(host));
scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,Collections.singletonList(host));
assertFalse(scheduler.getSchedulerApp(appAttemptId).isBlacklisted(host));
List update=Arrays.asList(createResourceRequest(GB,node.getHostName(),1,0,true));
scheduler.allocate(appAttemptId,update,Collections.emptyList(),Collections.singletonList(host),null);
assertTrue(app.isBlacklisted(host));
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Incorrect number of containers allocated",0,app.getLiveContainers().size());
scheduler.allocate(appAttemptId,update,Collections.emptyList(),null,Collections.singletonList(host));
assertFalse(app.isBlacklisted(host));
createSchedulingRequest(GB,"root.default","user",1);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Incorrect number of containers allocated",1,app.getLiveContainers().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testLowestCommonAncestorForNonRootParent() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
FSLeafQueue aQueue=mock(FSLeafQueue.class);
FSLeafQueue bQueue=mock(FSLeafQueue.class);
when(aQueue.getName()).thenReturn("root.queue1.a");
when(bQueue.getName()).thenReturn("root.queue1.b");
QueueManager queueManager=scheduler.getQueueManager();
FSParentQueue queue1=queueManager.getParentQueue("queue1",true);
queue1.addChildQueue(aQueue);
queue1.addChildQueue(bQueue);
FSQueue ancestorQueue=scheduler.findLowestCommonAncestorQueue(aQueue,bQueue);
assertEquals(ancestorQueue,queue1);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=3000) public void testMaxAssignWithZeroMemoryContainers() throws Exception {
conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE,true);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,0);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16384,16),0,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
ApplicationAttemptId attId=createSchedulingRequest(0,1,"root.default","user",8);
FSAppAttempt app=scheduler.getSchedulerApp(attId);
scheduler.maxAssign=2;
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Incorrect number of containers allocated",2,app.getLiveContainers().size());
scheduler.maxAssign=-1;
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Incorrect number of containers allocated",8,app.getLiveContainers().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testReservationThatDoesntFit() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
ApplicationAttemptId attId=createSchedulingRequest(2048,"queue1","user1",1);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent);
FSAppAttempt app=scheduler.getSchedulerApp(attId);
assertEquals(0,app.getLiveContainers().size());
assertEquals(0,app.getReservedContainers().size());
createSchedulingRequestExistingApplication(1024,2,attId);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals(1,app.getLiveContainers().size());
assertEquals(0,app.getReservedContainers().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAssignToQueue() throws Exception {
conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE,"true");
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMApp rmApp1=new MockRMApp(0,0,RMAppState.NEW);
RMApp rmApp2=new MockRMApp(1,1,RMAppState.NEW);
FSLeafQueue queue1=scheduler.assignToQueue(rmApp1,"default","asterix");
FSLeafQueue queue2=scheduler.assignToQueue(rmApp2,"notdefault","obelix");
assertEquals(rmApp1.getQueue(),queue1.getName());
assertEquals("root.asterix",rmApp1.getQueue());
assertEquals(rmApp2.getQueue(),queue2.getName());
assertEquals("root.notdefault",rmApp2.getQueue());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testReservationWhileMultiplePriorities() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024,4),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
ApplicationAttemptId attId=createSchedulingRequest(1024,4,"queue1","user1",1,2);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent);
FSAppAttempt app=scheduler.getSchedulerApp(attId);
assertEquals(1,app.getLiveContainers().size());
ContainerId containerId=scheduler.getSchedulerApp(attId).getLiveContainers().iterator().next().getContainerId();
createSchedulingRequestExistingApplication(1024,4,2,attId);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals(1,app.getLiveContainers().size());
assertEquals(0,scheduler.getRootQueueMetrics().getAvailableMB());
assertEquals(0,scheduler.getRootQueueMetrics().getAvailableVirtualCores());
createSchedulingRequestExistingApplication(1024,4,1,attId);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals(1,app.getLiveContainers().size());
for ( RMContainer container : app.getReservedContainers()) {
assertEquals(2,container.getReservedPriority().getPriority());
}
scheduler.allocate(attId,new ArrayList(),Arrays.asList(containerId),null,null);
assertEquals(1024,scheduler.getRootQueueMetrics().getAvailableMB());
assertEquals(4,scheduler.getRootQueueMetrics().getAvailableVirtualCores());
scheduler.update();
scheduler.handle(updateEvent);
Collection liveContainers=app.getLiveContainers();
assertEquals(1,liveContainers.size());
for ( RMContainer liveContainer : liveContainers) {
Assert.assertEquals(2,liveContainer.getContainer().getPriority().getPriority());
}
assertEquals(0,scheduler.getRootQueueMetrics().getAvailableMB());
assertEquals(0,scheduler.getRootQueueMetrics().getAvailableVirtualCores());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testMoveRunnableApp() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
QueueManager queueMgr=scheduler.getQueueManager();
FSLeafQueue oldQueue=queueMgr.getLeafQueue("queue1",true);
FSLeafQueue targetQueue=queueMgr.getLeafQueue("queue2",true);
ApplicationAttemptId appAttId=createSchedulingRequest(1024,1,"queue1","user1",3);
ApplicationId appId=appAttId.getApplicationId();
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(1024));
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
scheduler.handle(updateEvent);
assertEquals(Resource.newInstance(1024,1),oldQueue.getResourceUsage());
scheduler.update();
assertEquals(Resource.newInstance(3072,3),oldQueue.getDemand());
scheduler.moveApplication(appId,"queue2");
FSAppAttempt app=scheduler.getSchedulerApp(appAttId);
assertSame(targetQueue,app.getQueue());
assertFalse(oldQueue.getRunnableAppSchedulables().contains(app));
assertTrue(targetQueue.getRunnableAppSchedulables().contains(app));
assertEquals(Resource.newInstance(0,0),oldQueue.getResourceUsage());
assertEquals(Resource.newInstance(1024,1),targetQueue.getResourceUsage());
assertEquals(0,oldQueue.getNumRunnableApps());
assertEquals(1,targetQueue.getNumRunnableApps());
assertEquals(1,queueMgr.getRootQueue().getNumRunnableApps());
scheduler.update();
assertEquals(Resource.newInstance(0,0),oldQueue.getDemand());
assertEquals(Resource.newInstance(3072,3),targetQueue.getDemand());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testQueueMaxAMShareDefault() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println(" ");
out.println("");
out.println("1.0 ");
out.println(" ");
out.println("");
out.println(" ");
out.println("");
out.println(" ");
out.println("");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(8192,20),0,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
scheduler.update();
FSLeafQueue queue1=scheduler.getQueueManager().getLeafQueue("queue1",true);
assertEquals("Queue queue1's fair share should be 0",0,queue1.getFairShare().getMemory());
FSLeafQueue queue2=scheduler.getQueueManager().getLeafQueue("queue2",true);
assertEquals("Queue queue2's fair share should be 0",0,queue2.getFairShare().getMemory());
FSLeafQueue queue3=scheduler.getQueueManager().getLeafQueue("queue3",true);
assertEquals("Queue queue3's fair share should be 0",0,queue3.getFairShare().getMemory());
FSLeafQueue queue4=scheduler.getQueueManager().getLeafQueue("queue4",true);
assertEquals("Queue queue4's fair share should be 0",0,queue4.getFairShare().getMemory());
FSLeafQueue queue5=scheduler.getQueueManager().getLeafQueue("queue5",true);
assertEquals("Queue queue5's fair share should be 0",0,queue5.getFairShare().getMemory());
List queues=Arrays.asList("root.default","root.queue3","root.queue4","root.queue5");
for ( String queue : queues) {
createSchedulingRequest(1 * 1024,queue,"user1");
scheduler.update();
scheduler.handle(updateEvent);
}
Resource amResource1=Resource.newInstance(2048,1);
int amPriority=RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority();
ApplicationAttemptId attId1=createAppAttemptId(1,1);
createApplicationWithAMResource(attId1,"queue1","test1",amResource1);
createSchedulingRequestExistingApplication(2048,1,amPriority,attId1);
FSAppAttempt app1=scheduler.getSchedulerApp(attId1);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application1's AM requests 2048 MB memory",2048,app1.getAMResource().getMemory());
assertEquals("Application1's AM should be running",1,app1.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId2=createAppAttemptId(2,1);
createApplicationWithAMResource(attId2,"queue2","test1",amResource1);
createSchedulingRequestExistingApplication(2048,1,amPriority,attId2);
FSAppAttempt app2=scheduler.getSchedulerApp(attId2);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application2's AM requests 2048 MB memory",2048,app2.getAMResource().getMemory());
assertEquals("Application2's AM should not be running",0,app2.getLiveContainers().size());
assertEquals("Queue2's AM resource usage should be 0 MB memory",0,queue2.getAmResourceUsage().getMemory());
}
APIUtilityVerifier EqualityVerifier
@Test public void testNestedUserQueue() throws IOException {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,SimpleGroupsMapping.class,GroupMappingServiceProvider.class);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println("1024mb,0vcores ");
out.println(" ");
out.println("");
out.println(" ");
out.println("");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMApp rmApp1=new MockRMApp(0,0,RMAppState.NEW);
FSLeafQueue user1Leaf=scheduler.assignToQueue(rmApp1,"root.default","user1");
assertEquals("root.user1group.user1",user1Leaf.getName());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testFifoWithinQueue() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(3072,3),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
ApplicationAttemptId attId1=createSchedulingRequest(1024,"queue1","user1",2);
ApplicationAttemptId attId2=createSchedulingRequest(1024,"queue1","user1",2);
FSAppAttempt app1=scheduler.getSchedulerApp(attId1);
FSAppAttempt app2=scheduler.getSchedulerApp(attId2);
FSLeafQueue queue1=scheduler.getQueueManager().getLeafQueue("queue1",true);
queue1.setPolicy(new FifoPolicy());
scheduler.update();
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent);
assertEquals(1,app1.getLiveContainers().size());
assertEquals(0,app2.getLiveContainers().size());
scheduler.handle(updateEvent);
assertEquals(2,app1.getLiveContainers().size());
assertEquals(0,app2.getLiveContainers().size());
scheduler.handle(updateEvent);
assertEquals(2,app1.getLiveContainers().size());
assertEquals(1,app2.getLiveContainers().size());
}
APIUtilityVerifier EqualityVerifier
@Test public void testNestedUserQueueSecondaryGroup() throws Exception {
StringBuffer sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
QueuePlacementPolicy policy=parse(sb.toString());
assertEquals("root.default",policy.assignAppToQueue("root.default","user1"));
configuredQueues.get(FSQueueType.PARENT).add("root.user1subgroup1");
policy=parse(sb.toString());
assertEquals("root.user1subgroup1.user1",policy.assignAppToQueue("root.default","user1"));
}
APIUtilityVerifier EqualityVerifier
@Test public void testDefaultRuleWithQueueAttribute() throws Exception {
configuredQueues.get(FSQueueType.LEAF).add("root.someDefaultQueue");
StringBuffer sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
QueuePlacementPolicy policy=parse(sb.toString());
assertEquals("root.someDefaultQueue",policy.assignAppToQueue("root.default","user1"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSpecifiedThenReject() throws Exception {
StringBuffer sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
QueuePlacementPolicy policy=parse(sb.toString());
assertEquals("root.specifiedq",policy.assignAppToQueue("specifiedq","someuser"));
assertEquals(null,policy.assignAppToQueue("default","someuser"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNestedUserQueuePrimaryGroupNoCreate() throws Exception {
StringBuffer sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
QueuePlacementPolicy policy=parse(sb.toString());
assertEquals("root.default",policy.assignAppToQueue("root.default","user1"));
configuredQueues.get(FSQueueType.PARENT).add("root.user1group");
policy=parse(sb.toString());
assertEquals("root.user1group.user1",policy.assignAppToQueue("root.default","user1"));
sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
assertEquals("root.default",policy.assignAppToQueue("root.default","user2"));
configuredQueues.get(FSQueueType.PARENT).add("root.user2group");
configuredQueues.get(FSQueueType.LEAF).add("root.user2group.user2");
policy=parse(sb.toString());
assertEquals("root.user2group.user2",policy.assignAppToQueue("root.default","user2"));
}
APIUtilityVerifier EqualityVerifier
@Test public void testNestedUserQueueDefaultRule() throws Exception {
configuredQueues.get(FSQueueType.PARENT).add("root.parentq");
StringBuffer sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
QueuePlacementPolicy policy=parse(sb.toString());
assertEquals("root.parentq.user1",policy.assignAppToQueue("root.default","user1"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNoCreate() throws Exception {
StringBuffer sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
configuredQueues.get(FSQueueType.LEAF).add("root.someuser");
QueuePlacementPolicy policy=parse(sb.toString());
assertEquals("root.specifiedq",policy.assignAppToQueue("specifiedq","someuser"));
assertEquals("root.someuser",policy.assignAppToQueue("default","someuser"));
assertEquals("root.specifiedq",policy.assignAppToQueue("specifiedq","otheruser"));
assertEquals("root.default",policy.assignAppToQueue("default","otheruser"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNestedUserQueueSpecificRule() throws Exception {
StringBuffer sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
configuredQueues.get(FSQueueType.PARENT).add("root.parent1");
configuredQueues.get(FSQueueType.PARENT).add("root.parent2");
QueuePlacementPolicy policy=parse(sb.toString());
assertEquals("root.parent1.user1",policy.assignAppToQueue("root.parent1","user1"));
assertEquals("root.parent2.user2",policy.assignAppToQueue("root.parent2","user2"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSpecifiedUserPolicy() throws Exception {
StringBuffer sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
QueuePlacementPolicy policy=parse(sb.toString());
assertEquals("root.specifiedq",policy.assignAppToQueue("specifiedq","someuser"));
assertEquals("root.someuser",policy.assignAppToQueue("default","someuser"));
assertEquals("root.otheruser",policy.assignAppToQueue("default","otheruser"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNestedUserQueuePrimaryGroup() throws Exception {
StringBuffer sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
QueuePlacementPolicy policy=parse(sb.toString());
assertEquals("root.user1group.user1",policy.assignAppToQueue("root.default","user1"));
configuredQueues.get(FSQueueType.LEAF).add("root.specifiedq");
assertEquals("root.specifiedq",policy.assignAppToQueue("root.specifiedq","user2"));
configuredQueues.get(FSQueueType.LEAF).add("root.user3group");
assertEquals("root.default",policy.assignAppToQueue("root.default","user3"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@SuppressWarnings("resource") @Test public void testBlackListNodes() throws Exception {
Configuration conf=new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,FifoScheduler.class,ResourceScheduler.class);
MockRM rm=new MockRM(conf);
rm.start();
FifoScheduler fs=(FifoScheduler)rm.getResourceScheduler();
String host="127.0.0.1";
RMNode node=MockNodes.newNodeInfo(0,MockNodes.newResource(4 * GB),1,host);
fs.handle(new NodeAddedSchedulerEvent(node));
ApplicationId appId=BuilderUtils.newApplicationId(100,1);
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1);
SchedulerEvent appEvent=new AppAddedSchedulerEvent(appId,"default","user");
fs.handle(appEvent);
SchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId,false);
fs.handle(attemptEvent);
fs.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),Collections.singletonList(host),null);
Assert.assertTrue(fs.getApplicationAttempt(appAttemptId).isBlacklisted(host));
fs.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,Collections.singletonList(host));
Assert.assertFalse(fs.getApplicationAttempt(appAttemptId).isBlacklisted(host));
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=2000) public void testUpdateResourceOnNode() throws Exception {
AsyncDispatcher dispatcher=new InlineDispatcher();
Configuration conf=new Configuration();
RMContainerTokenSecretManager containerTokenSecretManager=new RMContainerTokenSecretManager(conf);
containerTokenSecretManager.rollMasterKey();
NMTokenSecretManagerInRM nmTokenSecretManager=new NMTokenSecretManagerInRM(conf);
nmTokenSecretManager.rollMasterKey();
RMApplicationHistoryWriter writer=mock(RMApplicationHistoryWriter.class);
RMContext rmContext=new RMContextImpl(dispatcher,null,null,null,null,null,containerTokenSecretManager,nmTokenSecretManager,null,writer);
FifoScheduler scheduler=new FifoScheduler(){
@SuppressWarnings("unused") public Map getNodes(){
return nodes;
}
}
;
scheduler.setRMContext(rmContext);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(new Configuration(),rmContext);
RMNode node0=MockNodes.newNodeInfo(1,Resources.createResource(2048,4),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node0);
scheduler.handle(nodeEvent1);
Method method=scheduler.getClass().getDeclaredMethod("getNodes");
@SuppressWarnings("unchecked") Map schedulerNodes=(Map)method.invoke(scheduler);
assertEquals(schedulerNodes.values().size(),1);
node0.setResourceOption(ResourceOption.newInstance(Resources.createResource(1024,4),RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT));
assertEquals(node0.getTotalCapability().getMemory(),1024);
assertEquals(schedulerNodes.get(node0.getNodeID()).getAvailableResource().getMemory(),2048);
NodeUpdateSchedulerEvent node0Update=new NodeUpdateSchedulerEvent(node0);
scheduler.handle(node0Update);
assertEquals(schedulerNodes.get(node0.getNodeID()).getAvailableResource().getMemory(),1024);
QueueInfo queueInfo=scheduler.getQueueInfo(null,false,false);
Assert.assertEquals(0.0f,queueInfo.getCurrentCapacity(),0.0f);
int _appId=1;
int _appAttemptId=1;
ApplicationAttemptId appAttemptId=createAppAttemptId(_appId,_appAttemptId);
AppAddedSchedulerEvent appEvent=new AppAddedSchedulerEvent(appAttemptId.getApplicationId(),"queue1","user1");
scheduler.handle(appEvent);
AppAttemptAddedSchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId,false);
scheduler.handle(attemptEvent);
int memory=1024;
int priority=1;
List ask=new ArrayList();
ResourceRequest nodeLocal=createResourceRequest(memory,node0.getHostName(),priority,1);
ResourceRequest rackLocal=createResourceRequest(memory,node0.getRackName(),priority,1);
ResourceRequest any=createResourceRequest(memory,ResourceRequest.ANY,priority,1);
ask.add(nodeLocal);
ask.add(rackLocal);
ask.add(any);
scheduler.allocate(appAttemptId,ask,new ArrayList(),null,null);
Assert.assertEquals(1,nodeLocal.getNumContainers());
scheduler.handle(node0Update);
Assert.assertEquals(0,nodeLocal.getNumContainers());
SchedulerAppReport info=scheduler.getSchedulerAppInfo(appAttemptId);
Assert.assertEquals(1,info.getLiveContainers().size());
queueInfo=scheduler.getQueueInfo(null,false,false);
Assert.assertEquals(1.0f,queueInfo.getCurrentCapacity(),0.0f);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=2000) public void testNodeLocalAssignment() throws Exception {
AsyncDispatcher dispatcher=new InlineDispatcher();
Configuration conf=new Configuration();
RMContainerTokenSecretManager containerTokenSecretManager=new RMContainerTokenSecretManager(conf);
containerTokenSecretManager.rollMasterKey();
NMTokenSecretManagerInRM nmTokenSecretManager=new NMTokenSecretManagerInRM(conf);
nmTokenSecretManager.rollMasterKey();
RMApplicationHistoryWriter writer=mock(RMApplicationHistoryWriter.class);
RMContext rmContext=new RMContextImpl(dispatcher,null,null,null,null,null,containerTokenSecretManager,nmTokenSecretManager,null,writer);
FifoScheduler scheduler=new FifoScheduler();
scheduler.setRMContext(rmContext);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(new Configuration(),rmContext);
RMNode node0=MockNodes.newNodeInfo(1,Resources.createResource(1024 * 64),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node0);
scheduler.handle(nodeEvent1);
int _appId=1;
int _appAttemptId=1;
ApplicationAttemptId appAttemptId=createAppAttemptId(_appId,_appAttemptId);
AppAddedSchedulerEvent appEvent=new AppAddedSchedulerEvent(appAttemptId.getApplicationId(),"queue1","user1");
scheduler.handle(appEvent);
AppAttemptAddedSchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId,false);
scheduler.handle(attemptEvent);
int memory=64;
int nConts=3;
int priority=20;
List ask=new ArrayList();
ResourceRequest nodeLocal=createResourceRequest(memory,node0.getHostName(),priority,nConts);
ResourceRequest rackLocal=createResourceRequest(memory,node0.getRackName(),priority,nConts);
ResourceRequest any=createResourceRequest(memory,ResourceRequest.ANY,priority,nConts);
ask.add(nodeLocal);
ask.add(rackLocal);
ask.add(any);
scheduler.allocate(appAttemptId,ask,new ArrayList(),null,null);
NodeUpdateSchedulerEvent node0Update=new NodeUpdateSchedulerEvent(node0);
Assert.assertEquals(3,nodeLocal.getNumContainers());
scheduler.handle(node0Update);
Assert.assertEquals(0,nodeLocal.getNumContainers());
SchedulerAppReport info=scheduler.getSchedulerAppInfo(appAttemptId);
Assert.assertEquals(3,info.getLiveContainers().size());
scheduler.stop();
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Validate master-key-roll-over and that tokens are usable even after
* master-key-roll-over.
* @throws Exception
*/
@Test public void testMasterKeyRollOver() throws Exception {
conf.setLong(YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,rolling_interval_sec);
conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,am_expire_ms);
MyContainerManager containerManager=new MyContainerManager();
final MockRMWithAMS rm=new MockRMWithAMS(conf,containerManager);
rm.start();
Long startTime=System.currentTimeMillis();
final Configuration conf=rm.getConfig();
final YarnRPC rpc=YarnRPC.create(conf);
ApplicationMasterProtocol rmClient=null;
AMRMTokenSecretManager appTokenSecretManager=rm.getRMContext().getAMRMTokenSecretManager();
MasterKeyData oldKey=appTokenSecretManager.getMasterKey();
Assert.assertNotNull(oldKey);
try {
MockNM nm1=rm.registerNode("localhost:1234",5120);
RMApp app=rm.submitApp(1024);
nm1.nodeHeartbeat(true);
int waitCount=0;
while (containerManager.containerTokens == null && waitCount++ < maxWaitAttempts) {
LOG.info("Waiting for AM Launch to happen..");
Thread.sleep(1000);
}
Assert.assertNotNull(containerManager.containerTokens);
RMAppAttempt attempt=app.getCurrentAppAttempt();
ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId();
UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
Credentials credentials=containerManager.getContainerCredentials();
final InetSocketAddress rmBindAddress=rm.getApplicationMasterService().getBindAddress();
Token extends TokenIdentifier> amRMToken=MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,credentials.getAllTokens());
currentUser.addToken(amRMToken);
rmClient=createRMClient(rm,conf,rpc,currentUser);
RegisterApplicationMasterRequest request=Records.newRecord(RegisterApplicationMasterRequest.class);
rmClient.registerApplicationMaster(request);
AllocateRequest allocateRequest=Records.newRecord(AllocateRequest.class);
Assert.assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null);
while (System.currentTimeMillis() - startTime < rolling_interval_sec * 1000) {
rmClient.allocate(allocateRequest);
Thread.sleep(500);
}
MasterKeyData newKey=appTokenSecretManager.getMasterKey();
Assert.assertNotNull(newKey);
Assert.assertFalse("Master key should have changed!",oldKey.equals(newKey));
rpc.stopProxy(rmClient,conf);
rmClient=createRMClient(rm,conf,rpc,currentUser);
Assert.assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null);
waitCount=0;
while (waitCount++ <= maxWaitAttempts) {
if (appTokenSecretManager.getCurrnetMasterKeyData() != oldKey) {
break;
}
try {
rmClient.allocate(allocateRequest);
}
catch ( Exception ex) {
break;
}
Thread.sleep(200);
}
Assert.assertTrue(appTokenSecretManager.getCurrnetMasterKeyData().equals(newKey));
Assert.assertTrue(appTokenSecretManager.getMasterKey().equals(newKey));
Assert.assertTrue(appTokenSecretManager.getNextMasterKeyData() == null);
Token newToken=appTokenSecretManager.createAndGetAMRMToken(applicationAttemptId);
SecurityUtil.setTokenService(newToken,rmBindAddress);
currentUser.addToken(newToken);
rpc.stopProxy(rmClient,conf);
rmClient=createRMClient(rm,conf,rpc,currentUser);
allocateRequest=Records.newRecord(AllocateRequest.class);
Assert.assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null);
rpc.stopProxy(rmClient,conf);
try {
currentUser.addToken(amRMToken);
rmClient=createRMClient(rm,conf,rpc,currentUser);
allocateRequest=Records.newRecord(AllocateRequest.class);
Assert.assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null);
Assert.fail("The old Token should not work");
}
catch ( Exception ex) {
}
}
finally {
rm.stop();
if (rmClient != null) {
rpc.stopProxy(rmClient,conf);
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testAMRMMasterKeysUpdate() throws Exception {
MockRM rm=new MockRM(conf){
@Override protected void doSecureLogin() throws IOException {
}
}
;
rm.start();
MockNM nm=rm.registerNode("127.0.0.1:1234",8000);
RMApp app=rm.submitApp(200);
MockAM am=MockRM.launchAndRegisterAM(app,rm,nm);
AllocateResponse response=am.allocate(Records.newRecord(AllocateRequest.class));
Assert.assertNull(response.getAMRMToken());
rm.getRMContext().getAMRMTokenSecretManager().rollMasterKey();
response=am.allocate(Records.newRecord(AllocateRequest.class));
Assert.assertNotNull(response.getAMRMToken());
Token amrmToken=ConverterUtils.convertFromYarn(response.getAMRMToken(),new Text(response.getAMRMToken().getService()));
Assert.assertEquals(amrmToken.decodeIdentifier().getKeyId(),rm.getRMContext().getAMRMTokenSecretManager().getMasterKey().getMasterKey().getKeyId());
response=am.allocate(Records.newRecord(AllocateRequest.class));
Assert.assertNull(response.getAMRMToken());
rm.getRMContext().getAMRMTokenSecretManager().activateNextMasterKey();
response=am.allocate(Records.newRecord(AllocateRequest.class));
Assert.assertNull(response.getAMRMToken());
rm.stop();
}
APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=60000) public void testAppRejectionWithCancelledDelegationToken() throws Exception {
MyFS dfs=(MyFS)FileSystem.get(conf);
LOG.info("dfs=" + (Object)dfs.hashCode() + ";conf="+ conf.hashCode());
MyToken token=dfs.getDelegationToken("user1");
token.cancelToken();
Credentials ts=new Credentials();
ts.addToken(token.getKind(),token);
ApplicationId appId=BuilderUtils.newApplicationId(0,0);
delegationTokenRenewer.addApplicationAsync(appId,ts,true);
int waitCnt=20;
while (waitCnt-- > 0) {
if (!eventQueue.isEmpty()) {
Event evt=eventQueue.take();
if (evt.getType() == RMAppEventType.APP_REJECTED) {
Assert.assertTrue(((RMAppEvent)evt).getApplicationId().equals(appId));
return;
}
}
else {
Thread.sleep(500);
}
}
fail("App submission with a cancelled token should have failed");
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Basic idea of the test:
* 1. create tokens.
* 2. Mark one of them to be renewed in 2 seconds (instead of
* 24 hours)
* 3. register them for renewal
* 4. sleep for 3 seconds
* 5. count number of renewals (should 3 initial ones + one extra)
* 6. register another token for 2 seconds
* 7. cancel it immediately
* 8. Sleep and check that the 2 seconds renew didn't happen
* (totally 5 renewals)
* 9. check cancellation
* @throws IOException
* @throws URISyntaxException
*/
@Test(timeout=60000) public void testDTRenewal() throws Exception {
MyFS dfs=(MyFS)FileSystem.get(conf);
LOG.info("dfs=" + (Object)dfs.hashCode() + ";conf="+ conf.hashCode());
MyToken token1, token2, token3;
token1=dfs.getDelegationToken("user1");
token2=dfs.getDelegationToken("user2");
token3=dfs.getDelegationToken("user3");
Renewer.tokenToRenewIn2Sec=token1;
LOG.info("token=" + token1 + " should be renewed for 2 secs");
String nn1=DelegationTokenRenewer.SCHEME + "://host1:0";
String nn2=DelegationTokenRenewer.SCHEME + "://host2:0";
String nn3=DelegationTokenRenewer.SCHEME + "://host3:0";
Credentials ts=new Credentials();
ts.addToken(new Text(nn1),token1);
ts.addToken(new Text(nn2),token2);
ts.addToken(new Text(nn3),token3);
ApplicationId applicationId_0=BuilderUtils.newApplicationId(0,0);
delegationTokenRenewer.addApplicationAsync(applicationId_0,ts,true);
waitForEventsToGetProcessed(delegationTokenRenewer);
int numberOfExpectedRenewals=3 + 1;
int attempts=10;
while (attempts-- > 0) {
try {
Thread.sleep(3 * 1000);
}
catch ( InterruptedException e) {
}
if (Renewer.counter == numberOfExpectedRenewals) break;
}
LOG.info("dfs=" + dfs.hashCode() + ";Counter = "+ Renewer.counter+ ";t="+ Renewer.lastRenewed);
assertEquals("renew wasn't called as many times as expected(4):",numberOfExpectedRenewals,Renewer.counter);
assertEquals("most recently renewed token mismatch",Renewer.lastRenewed,token1);
ts=new Credentials();
MyToken token4=dfs.getDelegationToken("user4");
Renewer.tokenToRenewIn2Sec=token4;
LOG.info("token=" + token4 + " should be renewed for 2 secs");
String nn4=DelegationTokenRenewer.SCHEME + "://host4:0";
ts.addToken(new Text(nn4),token4);
ApplicationId applicationId_1=BuilderUtils.newApplicationId(0,1);
delegationTokenRenewer.addApplicationAsync(applicationId_1,ts,true);
waitForEventsToGetProcessed(delegationTokenRenewer);
delegationTokenRenewer.applicationFinished(applicationId_1);
waitForEventsToGetProcessed(delegationTokenRenewer);
numberOfExpectedRenewals=Renewer.counter;
try {
Thread.sleep(6 * 1000);
}
catch ( InterruptedException e) {
}
LOG.info("Counter = " + Renewer.counter + ";t="+ Renewer.lastRenewed);
assertEquals("renew wasn't called as many times as expected",numberOfExpectedRenewals,Renewer.counter);
try {
token4.renew(conf);
fail("Renewal of cancelled token should have failed");
}
catch ( InvalidToken ite) {
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier EqualityVerifier HybridVerifier
/**
* Basic idea of the test:
* 0. Setup token KEEP_ALIVE
* 1. create tokens.
* 2. register them for renewal - to be cancelled on app complete
* 3. Complete app.
* 4. Verify token is alive within the KEEP_ALIVE time
* 5. Verify token has been cancelled after the KEEP_ALIVE_TIME
* @throws IOException
* @throws URISyntaxException
*/
@Test(timeout=60000) public void testDTKeepAlive1() throws Exception {
Configuration lconf=new Configuration(conf);
lconf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,true);
lconf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,6000l);
lconf.setLong(YarnConfiguration.RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS,1000l);
DelegationTokenRenewer localDtr=createNewDelegationTokenRenewer(lconf,counter);
RMContext mockContext=mock(RMContext.class);
ClientRMService mockClientRMService=mock(ClientRMService.class);
when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
when(mockContext.getDelegationTokenRenewer()).thenReturn(localDtr);
when(mockContext.getDispatcher()).thenReturn(dispatcher);
InetSocketAddress sockAddr=InetSocketAddress.createUnresolved("localhost",1234);
when(mockClientRMService.getBindAddress()).thenReturn(sockAddr);
localDtr.setRMContext(mockContext);
localDtr.init(lconf);
localDtr.start();
MyFS dfs=(MyFS)FileSystem.get(lconf);
LOG.info("dfs=" + (Object)dfs.hashCode() + ";conf="+ lconf.hashCode());
Credentials ts=new Credentials();
MyToken token1=dfs.getDelegationToken("user1");
String nn1=DelegationTokenRenewer.SCHEME + "://host1:0";
ts.addToken(new Text(nn1),token1);
ApplicationId applicationId_0=BuilderUtils.newApplicationId(0,0);
localDtr.addApplicationAsync(applicationId_0,ts,true);
waitForEventsToGetProcessed(localDtr);
if (!eventQueue.isEmpty()) {
Event evt=eventQueue.take();
if (evt instanceof RMAppEvent) {
Assert.assertEquals(((RMAppEvent)evt).getType(),RMAppEventType.START);
}
else {
fail("RMAppEvent.START was expected!!");
}
}
localDtr.applicationFinished(applicationId_0);
waitForEventsToGetProcessed(localDtr);
token1.renew(lconf);
Thread.sleep(10000l);
try {
token1.renew(lconf);
fail("Renewal of cancelled token should have failed");
}
catch ( InvalidToken ite) {
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAppsQueryStates() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
RMApp killedApp=rm.submitApp(CONTAINER_MB);
rm.killApp(killedApp.getApplicationId());
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
MultivaluedMapImpl params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString());
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("state not equal to ACCEPTED","ACCEPTED",array.getJSONObject(0).getString("state"));
r=resource();
params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString());
params.add("states",YarnApplicationState.KILLED.toString());
response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue("both app states of ACCEPTED and KILLED are not present",(array.getJSONObject(0).getString("state").equals("ACCEPTED") && array.getJSONObject(1).getString("state").equals("KILLED")) || (array.getJSONObject(0).getString("state").equals("KILLED") && array.getJSONObject(1).getString("state").equals("ACCEPTED")));
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryStartEnd() throws JSONException, Exception {
rm.start();
rm.registerNode("127.0.0.1:1234",2048);
long end=System.currentTimeMillis();
Thread.sleep(1);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("startedTimeEnd",String.valueOf(end)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("apps is not null",JSONObject.NULL,json.get("apps"));
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryStartBegin() throws JSONException, Exception {
rm.start();
long start=System.currentTimeMillis();
Thread.sleep(1);
rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("startedTimeBegin",String.valueOf(start)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsXMLMulti() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB,"testwordcount","user1");
rm.submitApp(2048,"testwordcount2","user1");
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodesApps=dom.getElementsByTagName("apps");
assertEquals("incorrect number of elements",1,nodesApps.getLength());
NodeList nodes=dom.getElementsByTagName("app");
assertEquals("incorrect number of elements",2,nodes.getLength());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryFinishBegin() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
long start=System.currentTimeMillis();
Thread.sleep(1);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
am.registerAppAttempt();
am.unregisterAppAttempt();
amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finishedTimeBegin",String.valueOf(start)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleAppsXML() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount","user1");
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").path(app1.getApplicationId().toString()).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("app");
assertEquals("incorrect number of elements",1,nodes.getLength());
verifyAppsXML(nodes,app1);
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryFinishEnd() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
am.registerAppAttempt();
am.unregisterAppAttempt();
amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
long end=System.currentTimeMillis();
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finishedTimeEnd",String.valueOf(end)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryStartBeginEnd() throws JSONException, Exception {
rm.start();
rm.registerNode("127.0.0.1:1234",2048);
long start=System.currentTimeMillis();
Thread.sleep(1);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
long end=System.currentTimeMillis();
Thread.sleep(1);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("startedTimeBegin",String.valueOf(start)).queryParam("startedTimeEnd",String.valueOf(end)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryFinishBeginEnd() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
long start=System.currentTimeMillis();
Thread.sleep(1);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
am.registerAppAttempt();
am.unregisterAppAttempt();
amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
long end=System.currentTimeMillis();
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finishedTimeBegin",String.valueOf(start)).queryParam("finishedTimeEnd",String.valueOf(end)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppAttemptsXML() throws JSONException, Exception {
rm.start();
String user="user1";
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount",user);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").path(app1.getApplicationId().toString()).path("appattempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("appAttempts");
assertEquals("incorrect number of elements",1,nodes.getLength());
NodeList attempt=dom.getElementsByTagName("appAttempt");
assertEquals("incorrect number of elements",1,attempt.getLength());
verifyAppAttemptsXML(attempt,app1.getCurrentAppAttempt(),user);
rm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAppsQueryStatesComma() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
RMApp killedApp=rm.submitApp(CONTAINER_MB);
rm.killApp(killedApp.getApplicationId());
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
MultivaluedMapImpl params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString());
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("state not equal to ACCEPTED","ACCEPTED",array.getJSONObject(0).getString("state"));
r=resource();
params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString() + "," + YarnApplicationState.KILLED.toString());
response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue("both app states of ACCEPTED and KILLED are not present",(array.getJSONObject(0).getString("state").equals("ACCEPTED") && array.getJSONObject(1).getString("state").equals("KILLED")) || (array.getJSONObject(0).getString("state").equals("KILLED") && array.getJSONObject(1).getString("state").equals("ACCEPTED")));
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsXML() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount","user1");
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodesApps=dom.getElementsByTagName("apps");
assertEquals("incorrect number of elements",1,nodesApps.getLength());
NodeList nodes=dom.getElementsByTagName("app");
assertEquals("incorrect number of elements",1,nodes.getLength());
verifyAppsXML(nodes,app1);
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryStartBeginSome() throws JSONException, Exception {
rm.start();
rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
long start=System.currentTimeMillis();
Thread.sleep(1);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("startedTimeBegin",String.valueOf(start)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
rm.stop();
}
APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleAppKillInvalidState() throws Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML};
MediaType[] contentTypes={MediaType.APPLICATION_JSON_TYPE,MediaType.APPLICATION_XML_TYPE};
String[] targetStates={YarnApplicationState.FINISHED.toString(),"blah"};
for ( String mediaType : mediaTypes) {
for ( MediaType contentType : contentTypes) {
for ( String targetStateString : targetStates) {
RMApp app=rm.submitApp(CONTAINER_MB,"",webserviceUserName);
amNodeManager.nodeHeartbeat(true);
ClientResponse response;
AppState targetState=new AppState(targetStateString);
Object entity;
if (contentType == MediaType.APPLICATION_JSON_TYPE) {
entity=appStateToJSON(targetState);
}
else {
entity=targetState;
}
response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").entity(entity,contentType).accept(mediaType).put(ClientResponse.class);
if (!isAuthenticationEnabled()) {
assertEquals(Status.UNAUTHORIZED,response.getClientResponseStatus());
continue;
}
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
}
}
}
rm.stop();
return;
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=90000) public void testSingleAppKill() throws Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML};
MediaType[] contentTypes={MediaType.APPLICATION_JSON_TYPE,MediaType.APPLICATION_XML_TYPE};
for ( String mediaType : mediaTypes) {
for ( MediaType contentType : contentTypes) {
RMApp app=rm.submitApp(CONTAINER_MB,"",webserviceUserName);
amNodeManager.nodeHeartbeat(true);
ClientResponse response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").accept(mediaType).get(ClientResponse.class);
AppState targetState=new AppState(YarnApplicationState.KILLED.toString());
Object entity;
if (contentType == MediaType.APPLICATION_JSON_TYPE) {
entity=appStateToJSON(targetState);
}
else {
entity=targetState;
}
response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").entity(entity,contentType).accept(mediaType).put(ClientResponse.class);
if (!isAuthenticationEnabled()) {
assertEquals(Status.UNAUTHORIZED,response.getClientResponseStatus());
continue;
}
assertEquals(Status.ACCEPTED,response.getClientResponseStatus());
if (mediaType == MediaType.APPLICATION_JSON) {
verifyAppStateJson(response,RMAppState.KILLING,RMAppState.ACCEPTED);
}
else {
verifyAppStateXML(response,RMAppState.KILLING,RMAppState.ACCEPTED);
}
String locationHeaderValue=response.getHeaders().getFirst(HttpHeaders.LOCATION);
Client c=Client.create();
WebResource tmp=c.resource(locationHeaderValue);
if (isAuthenticationEnabled()) {
tmp=tmp.queryParam("user.name",webserviceUserName);
}
response=tmp.get(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
assertTrue(locationHeaderValue.endsWith("/ws/v1/cluster/apps/" + app.getApplicationId().toString() + "/state"));
while (true) {
Thread.sleep(100);
response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").accept(mediaType).entity(entity,contentType).put(ClientResponse.class);
assertTrue((response.getClientResponseStatus() == Status.ACCEPTED) || (response.getClientResponseStatus() == Status.OK));
if (response.getClientResponseStatus() == Status.OK) {
assertEquals(RMAppState.KILLED,app.getState());
if (mediaType == MediaType.APPLICATION_JSON) {
verifyAppStateJson(response,RMAppState.KILLED);
}
else {
verifyAppStateXML(response,RMAppState.KILLED);
}
break;
}
}
}
}
rm.stop();
return;
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleAppState() throws Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML};
for ( String mediaType : mediaTypes) {
RMApp app=rm.submitApp(CONTAINER_MB,"",webserviceUserName);
amNodeManager.nodeHeartbeat(true);
ClientResponse response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").accept(mediaType).get(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
if (mediaType == MediaType.APPLICATION_JSON) {
verifyAppStateJson(response,RMAppState.ACCEPTED);
}
else if (mediaType == MediaType.APPLICATION_XML) {
verifyAppStateXML(response,RMAppState.ACCEPTED);
}
}
rm.stop();
}
APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleAppKillInvalidId() throws Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
amNodeManager.nodeHeartbeat(true);
String[] testAppIds={"application_1391705042196_0001","random_string"};
for ( String testAppId : testAppIds) {
AppState info=new AppState("KILLED");
ClientResponse response=this.constructWebResource("apps",testAppId,"state").accept(MediaType.APPLICATION_XML).entity(info,MediaType.APPLICATION_XML).put(ClientResponse.class);
if (!isAuthenticationEnabled()) {
assertEquals(Status.UNAUTHORIZED,response.getClientResponseStatus());
continue;
}
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
}
rm.stop();
return;
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppSubmitBadJsonAndXML() throws Exception {
String urlPath="apps";
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
amNodeManager.nodeHeartbeat(true);
ApplicationSubmissionContextInfo appInfo=new ApplicationSubmissionContextInfo();
appInfo.setApplicationName("test");
appInfo.setPriority(3);
appInfo.setMaxAppAttempts(2);
appInfo.setQueue("testqueue");
appInfo.setApplicationType("test-type");
HashMap lr=new HashMap();
LocalResourceInfo y=new LocalResourceInfo();
y.setUrl(new URI("http://www.test.com/file.txt"));
y.setSize(100);
y.setTimestamp(System.currentTimeMillis());
y.setType(LocalResourceType.FILE);
y.setVisibility(LocalResourceVisibility.APPLICATION);
lr.put("example",y);
appInfo.getContainerLaunchContextInfo().setResources(lr);
appInfo.getResource().setMemory(1024);
appInfo.getResource().setvCores(1);
String body=" ";
ClientResponse response=this.constructWebResource(urlPath).accept(MediaType.APPLICATION_XML).entity(body,MediaType.APPLICATION_XML).post(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
body="{\"a\" : \"b\"}";
response=this.constructWebResource(urlPath).accept(MediaType.APPLICATION_XML).entity(body,MediaType.APPLICATION_JSON).post(ClientResponse.class);
validateResponseStatus(response,Status.BAD_REQUEST);
rm.stop();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPerUserResourcesJSON() throws Exception {
rm.start();
try {
rm.submitApp(10,"app1","user1",null,"b1");
rm.submitApp(20,"app2","user2",null,"b1");
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
JSONObject schedulerInfo=json.getJSONObject("scheduler").getJSONObject("schedulerInfo");
JSONObject b1=getSubQueue(getSubQueue(schedulerInfo,"b"),"b1");
JSONArray users=b1.getJSONObject("users").getJSONArray("user");
for (int i=0; i < 2; ++i) {
JSONObject user=users.getJSONObject(i);
assertTrue("User isn't user1 or user2",user.getString("username").equals("user1") || user.getString("username").equals("user2"));
user.getInt("numActiveApplications");
user.getInt("numPendingApplications");
checkResourcesUsed(user);
}
}
finally {
rm.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testClusterSchedulerXML() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler/").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList scheduler=dom.getElementsByTagName("scheduler");
assertEquals("incorrect number of elements",1,scheduler.getLength());
NodeList schedulerInfo=dom.getElementsByTagName("schedulerInfo");
assertEquals("incorrect number of elements",1,schedulerInfo.getLength());
verifyClusterSchedulerXML(schedulerInfo);
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test per user resources and resourcesUsed elements in the web services XML
* @throws Exception
*/
@Test public void testPerUserResourcesXML() throws Exception {
rm.start();
try {
rm.submitApp(10,"app1","user1",null,"b1");
rm.submitApp(20,"app2","user2",null,"b1");
WebResource r=resource();
ClientResponse response=r.path("ws/v1/cluster/scheduler").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilder db=DocumentBuilderFactory.newInstance().newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList allUsers=dom.getElementsByTagName("users");
for (int i=0; i < allUsers.getLength(); ++i) {
Node perUserResources=allUsers.item(i);
String queueName=getChildNodeByName(perUserResources.getParentNode(),"queueName").getTextContent();
if (queueName.equals("b1")) {
assertEquals(2,perUserResources.getChildNodes().getLength());
NodeList users=perUserResources.getChildNodes();
for (int j=0; j < users.getLength(); ++j) {
Node user=users.item(j);
String username=getChildNodeByName(user,"username").getTextContent();
assertTrue(username.equals("user1") || username.equals("user2"));
Integer.parseInt(getChildNodeByName(getChildNodeByName(user,"resourcesUsed"),"memory").getTextContent());
Integer.parseInt(getChildNodeByName(user,"numActiveApplications").getTextContent());
Integer.parseInt(getChildNodeByName(user,"numPendingApplications").getTextContent());
}
}
else {
assertEquals(0,perUserResources.getChildNodes().getLength());
}
}
NodeList allResourcesUsed=dom.getElementsByTagName("resourcesUsed");
for (int i=0; i < allResourcesUsed.getLength(); ++i) {
Node resourcesUsed=allResourcesUsed.item(i);
Integer.parseInt(getChildNodeByName(resourcesUsed,"memory").getTextContent());
Integer.parseInt(getChildNodeByName(resourcesUsed,"vCores").getTextContent());
}
}
finally {
rm.stop();
}
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test public void testCancelledDelegationToken() throws Exception {
String token=getDelegationToken("client");
cancelDelegationToken(token);
ApplicationSubmissionContextInfo app=new ApplicationSubmissionContextInfo();
String appid="application_123_0";
app.setApplicationId(appid);
String requestBody=getMarshalledAppInfo(app);
URL url=new URL("http://localhost:8088/ws/v1/cluster/apps");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(DelegationTokenHeader,token);
setupConn(conn,"POST",MediaType.APPLICATION_XML,requestBody);
try {
conn.getInputStream();
fail("Authentication should fail with expired delegation tokens");
}
catch ( IOException e) {
assertEquals(Status.FORBIDDEN.getStatusCode(),conn.getResponseCode());
}
return;
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test public void testDelegationTokenOps() throws Exception {
String token=getDelegationToken("client");
String createRequest="{\"renewer\":\"test\"}";
String renewRequest="{\"token\": \"" + token + "\"}";
String[] requests={createRequest,renewRequest};
for ( String requestBody : requests) {
URL url=new URL("http://localhost:8088/ws/v1/cluster/delegation-token");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(DelegationTokenHeader,token);
setupConn(conn,"POST",MediaType.APPLICATION_JSON,requestBody);
try {
conn.getInputStream();
fail("Creation/Renewing delegation tokens should not be " + "allowed with token auth");
}
catch ( IOException e) {
assertEquals(Status.FORBIDDEN.getStatusCode(),conn.getResponseCode());
}
}
URL url=new URL("http://localhost:8088/ws/v1/cluster/delegation-token");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(DelegationTokenHeader,token);
conn.setRequestProperty(RMWebServices.DELEGATION_TOKEN_HEADER,token);
setupConn(conn,"DELETE",null,null);
try {
conn.getInputStream();
fail("Cancelling delegation tokens should not be allowed with token auth");
}
catch ( IOException e) {
assertEquals(Status.FORBIDDEN.getStatusCode(),conn.getResponseCode());
}
return;
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDelegationTokenAuth() throws Exception {
final String token=getDelegationToken("test");
ApplicationSubmissionContextInfo app=new ApplicationSubmissionContextInfo();
String appid="application_123_0";
app.setApplicationId(appid);
String requestBody=getMarshalledAppInfo(app);
URL url=new URL("http://localhost:8088/ws/v1/cluster/apps");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
setupConn(conn,"POST","application/xml",requestBody);
try {
conn.getInputStream();
fail("we should not be here");
}
catch ( IOException e) {
assertEquals(Status.UNAUTHORIZED.getStatusCode(),conn.getResponseCode());
}
conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(DelegationTokenHeader,token);
setupConn(conn,"POST",MediaType.APPLICATION_XML,requestBody);
conn.getInputStream();
boolean appExists=rm.getRMContext().getRMApps().containsKey(ConverterUtils.toApplicationId(appid));
assertTrue(appExists);
RMApp actualApp=rm.getRMContext().getRMApps().get(ConverterUtils.toApplicationId(appid));
String owner=actualApp.getUser();
assertEquals("client",owner);
return;
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRenewDelegationToken() throws Exception {
client().addFilter(new LoggingFilter(System.out));
rm.start();
final String renewer="client2";
this.client().addFilter(new LoggingFilter(System.out));
final DelegationToken dummyToken=new DelegationToken();
dummyToken.setRenewer(renewer);
String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML};
for ( final String mediaType : mediaTypes) {
for ( final String contentType : mediaTypes) {
if (isKerberosAuth == false) {
verifySimpleAuthRenew(mediaType,contentType);
continue;
}
final DelegationToken responseToken=KerberosTestUtils.doAsClient(new Callable(){
@Override public DelegationToken call() throws Exception {
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").accept(contentType).entity(dummyToken,mediaType).post(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
DelegationToken tok=getDelegationTokenFromResponse(response);
assertFalse(tok.getToken().isEmpty());
String body=generateRenewTokenBody(mediaType,tok.getToken());
response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,tok.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class);
assertEquals(Status.FORBIDDEN,response.getClientResponseStatus());
return tok;
}
}
);
KerberosTestUtils.doAs(renewer,new Callable(){
@Override public DelegationToken call() throws Exception {
long oldExpirationTime=Time.now();
assertValidRMToken(responseToken.getToken());
String body=generateRenewTokenBody(mediaType,responseToken.getToken());
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,responseToken.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
DelegationToken tok=getDelegationTokenFromResponse(response);
String message="Expiration time not as expected: old = " + oldExpirationTime + "; new = "+ tok.getNextExpirationTime();
assertTrue(message,tok.getNextExpirationTime() > oldExpirationTime);
oldExpirationTime=tok.getNextExpirationTime();
Thread.sleep(1000);
response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,responseToken.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
tok=getDelegationTokenFromResponse(response);
message="Expiration time not as expected: old = " + oldExpirationTime + "; new = "+ tok.getNextExpirationTime();
assertTrue(message,tok.getNextExpirationTime() > oldExpirationTime);
return tok;
}
}
);
KerberosTestUtils.doAs("client3",new Callable(){
@Override public DelegationToken call() throws Exception {
String body=generateRenewTokenBody(mediaType,responseToken.getToken());
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,responseToken.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class);
assertEquals(Status.FORBIDDEN,response.getClientResponseStatus());
return null;
}
}
);
KerberosTestUtils.doAsClient(new Callable(){
@Override public Void call() throws Exception {
String token="TEST_TOKEN_STRING";
String body="";
if (mediaType.equals(MediaType.APPLICATION_JSON)) {
body="{\"token\": \"" + token + "\" }";
}
else {
body="" + token + " ";
}
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").accept(contentType).entity(body,mediaType).post(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
return null;
}
}
);
}
}
rm.stop();
return;
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testCancelDelegationToken() throws Exception {
rm.start();
this.client().addFilter(new LoggingFilter(System.out));
if (isKerberosAuth == false) {
verifySimpleAuthCancel();
return;
}
final DelegationToken dtoken=new DelegationToken();
String renewer="client2";
dtoken.setRenewer(renewer);
String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML};
for ( final String mediaType : mediaTypes) {
for ( final String contentType : mediaTypes) {
KerberosTestUtils.doAsClient(new Callable(){
@Override public Void call() throws Exception {
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").accept(contentType).entity(dtoken,mediaType).post(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
DelegationToken tok=getDelegationTokenFromResponse(response);
response=resource().path("ws").path("v1").path("cluster").path("delegation-token").header(yarnTokenHeader,tok.getToken()).accept(contentType).delete(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
assertTokenCancelled(tok.getToken());
return null;
}
}
);
final DelegationToken tmpToken=KerberosTestUtils.doAsClient(new Callable(){
@Override public DelegationToken call() throws Exception {
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").accept(contentType).entity(dtoken,mediaType).post(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
DelegationToken tok=getDelegationTokenFromResponse(response);
return tok;
}
}
);
KerberosTestUtils.doAs(renewer,new Callable(){
@Override public Void call() throws Exception {
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").header(yarnTokenHeader,tmpToken.getToken()).accept(contentType).delete(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
assertTokenCancelled(tmpToken.getToken());
return null;
}
}
);
final DelegationToken tmpToken2=KerberosTestUtils.doAsClient(new Callable(){
@Override public DelegationToken call() throws Exception {
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").accept(contentType).entity(dtoken,mediaType).post(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
DelegationToken tok=getDelegationTokenFromResponse(response);
return tok;
}
}
);
KerberosTestUtils.doAs("client3",new Callable(){
@Override public Void call() throws Exception {
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").header(yarnTokenHeader,tmpToken2.getToken()).accept(contentType).delete(ClientResponse.class);
assertEquals(Status.FORBIDDEN,response.getClientResponseStatus());
assertValidRMToken(tmpToken2.getToken());
return null;
}
}
);
testCancelTokenBadRequests(mediaType,contentType);
}
}
rm.stop();
return;
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleNodesXML() throws JSONException, Exception {
rm.start();
WebResource r=resource();
MockNM nm1=rm.registerNode("h1:1234",5120);
ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").path("h1:1234").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("node");
assertEquals("incorrect number of elements",1,nodes.getLength());
verifyNodesXML(nodes,nm1);
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNodes2XML() throws JSONException, Exception {
rm.start();
WebResource r=resource();
rm.registerNode("h1:1234",5120);
rm.registerNode("h2:1235",5121);
ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodesApps=dom.getElementsByTagName("nodes");
assertEquals("incorrect number of elements",1,nodesApps.getLength());
NodeList nodes=dom.getElementsByTagName("node");
assertEquals("incorrect number of elements",2,nodes.getLength());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNodesXML() throws JSONException, Exception {
rm.start();
WebResource r=resource();
MockNM nm1=rm.registerNode("h1:1234",5120);
ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodesApps=dom.getElementsByTagName("nodes");
assertEquals("incorrect number of elements",1,nodesApps.getLength());
NodeList nodes=dom.getElementsByTagName("node");
assertEquals("incorrect number of elements",1,nodes.getLength());
verifyNodesXML(nodes,nm1);
rm.stop();
}
APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier
@Test public void testSimpleAuth() throws Exception {
rm.start();
URL url=new URL("http://localhost:8088/cluster");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
try {
conn.getInputStream();
assertEquals(Status.OK.getStatusCode(),conn.getResponseCode());
}
catch ( Exception e) {
fail("Fetching url failed");
}
if (UserGroupInformation.isSecurityEnabled()) {
testAnonymousKerberosUser();
}
else {
testAnonymousSimpleUser();
}
rm.stop();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRootDirPermission() throws IOException {
FileSystem fs=FileSystem.getLocal(new YarnConfiguration());
FileStatus file=fs.getFileStatus(new Path(fsPath.getAbsolutePath(),LeveldbTimelineStore.FILENAME));
assertNotNull(file);
assertEquals(LeveldbTimelineStore.LEVELDB_DIR_UMASK,file.getPermission());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testDeleteEntitiesPrimaryFilters() throws IOException, InterruptedException {
Map primaryFilter=Collections.singletonMap("user",Collections.singleton((Object)"otheruser"));
TimelineEntities atsEntities=new TimelineEntities();
atsEntities.setEntities(Collections.singletonList(createEntity(entityId1b,entityType1,789l,Collections.singletonList(ev2),null,primaryFilter,null)));
TimelinePutResponse response=store.put(atsEntities);
assertEquals(0,response.getErrors().size());
NameValuePair pfPair=new NameValuePair("user","otheruser");
List entities=getEntitiesWithPrimaryFilter("type_1",pfPair);
assertEquals(1,entities.size());
verifyEntityInfo(entityId1b,entityType1,Collections.singletonList(ev2),EMPTY_REL_ENTITIES,primaryFilter,EMPTY_MAP,entities.get(0));
entities=getEntitiesWithPrimaryFilter("type_1",userFilter);
assertEquals(2,entities.size());
verifyEntityInfo(entityId1,entityType1,events1,EMPTY_REL_ENTITIES,primaryFilters,otherInfo,entities.get(0));
verifyEntityInfo(entityId1b,entityType1,events1,EMPTY_REL_ENTITIES,primaryFilters,otherInfo,entities.get(1));
((LeveldbTimelineStore)store).discardOldEntities(-123l);
assertEquals(1,getEntitiesWithPrimaryFilter("type_1",pfPair).size());
assertEquals(2,getEntitiesWithPrimaryFilter("type_1",userFilter).size());
((LeveldbTimelineStore)store).discardOldEntities(123l);
assertEquals(0,getEntities("type_1").size());
assertEquals(0,getEntities("type_2").size());
assertEquals(0,((LeveldbTimelineStore)store).getEntityTypes().size());
assertEquals(0,getEntitiesWithPrimaryFilter("type_1",pfPair).size());
assertEquals(0,getEntitiesWithPrimaryFilter("type_1",userFilter).size());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetFilterParameters(){
Configuration conf=new Configuration();
conf.set(CrossOriginFilterInitializer.PREFIX + "rootparam","rootvalue");
conf.set(CrossOriginFilterInitializer.PREFIX + "nested.param","nestedvalue");
conf.set("outofscopeparam","outofscopevalue");
Map filterParameters=CrossOriginFilterInitializer.getFilterParameters(conf);
String rootvalue=filterParameters.get(CrossOriginFilterInitializer.PREFIX + "rootparam");
String nestedvalue=filterParameters.get(CrossOriginFilterInitializer.PREFIX + "nested.param");
String outofscopeparam=filterParameters.get("outofscopeparam");
Assert.assertEquals("Could not find filter parameter","rootvalue",rootvalue);
Assert.assertEquals("Could not find filter parameter","nestedvalue",nestedvalue);
Assert.assertNull("Found unexpected value in filter parameters",outofscopeparam);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFromTs() throws Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("fromTs",Long.toString(beforeTime)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
assertEquals(0,response.getEntity(TimelineEntities.class).getEntities().size());
response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("fromTs",Long.toString(System.currentTimeMillis())).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
assertEquals(2,response.getEntity(TimelineEntities.class).getEntities().size());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testPostEntitiesWithYarnACLsEnabled() throws Exception {
AdminACLsManager oldAdminACLsManager=timelineACLsManager.setAdminACLsManager(adminACLsManager);
try {
TimelineEntities entities=new TimelineEntities();
TimelineEntity entity=new TimelineEntity();
entity.setEntityId("test id 2");
entity.setEntityType("test type 2");
entity.setStartTime(System.currentTimeMillis());
entities.addEntity(entity);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
TimelinePutResponse putResponse=response.getEntity(TimelinePutResponse.class);
Assert.assertNotNull(putResponse);
Assert.assertEquals(0,putResponse.getErrors().size());
response=r.path("ws").path("v1").path("timeline").queryParam("user.name","other").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
putResponse=response.getEntity(TimelinePutResponse.class);
Assert.assertNotNull(putResponse);
Assert.assertEquals(1,putResponse.getErrors().size());
Assert.assertEquals(TimelinePutResponse.TimelinePutError.ACCESS_DENIED,putResponse.getErrors().get(0).getErrorCode());
}
finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testPostEntitiesWithPrimaryFilter() throws Exception {
TimelineEntities entities=new TimelineEntities();
TimelineEntity entity=new TimelineEntity();
Map filters=new HashMap();
filters.put(TimelineStore.SystemFilter.ENTITY_OWNER.toString(),new HashSet());
entity.setPrimaryFilters(filters);
entity.setEntityId("test id 6");
entity.setEntityType("test type 6");
entity.setStartTime(System.currentTimeMillis());
entities.addEntity(entity);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities);
TimelinePutResponse putResposne=response.getEntity(TimelinePutResponse.class);
Assert.assertEquals(1,putResposne.getErrors().size());
List errors=putResposne.getErrors();
Assert.assertEquals(TimelinePutResponse.TimelinePutError.SYSTEM_FILTER_CONFLICT,errors.get(0).getErrorCode());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetEntityWithYarnACLsEnabled() throws Exception {
AdminACLsManager oldAdminACLsManager=timelineACLsManager.setAdminACLsManager(adminACLsManager);
try {
TimelineEntities entities=new TimelineEntities();
TimelineEntity entity=new TimelineEntity();
entity.setEntityId("test id 3");
entity.setEntityType("test type 3");
entity.setStartTime(System.currentTimeMillis());
entities.addEntity(entity);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities);
response=r.path("ws").path("v1").path("timeline").path("test type 3").path("test id 3").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
entity=response.getEntity(TimelineEntity.class);
Assert.assertNull(entity.getPrimaryFilters().get(TimelineStore.SystemFilter.ENTITY_OWNER.toString()));
response=r.path("ws").path("v1").path("timeline").path("test type 3").path("test id 3").queryParam("fields","relatedentities").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
entity=response.getEntity(TimelineEntity.class);
Assert.assertNull(entity.getPrimaryFilters().get(TimelineStore.SystemFilter.ENTITY_OWNER.toString()));
response=r.path("ws").path("v1").path("timeline").path("test type 3").path("test id 3").queryParam("fields","primaryfilters").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
entity=response.getEntity(TimelineEntity.class);
Assert.assertNull(entity.getPrimaryFilters().get(TimelineStore.SystemFilter.ENTITY_OWNER.toString()));
response=r.path("ws").path("v1").path("timeline").path("test type 3").path("test id 3").queryParam("user.name","other").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
assertEquals(ClientResponse.Status.NOT_FOUND,response.getClientResponseStatus());
}
finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testPostEntities() throws Exception {
TimelineEntities entities=new TimelineEntities();
TimelineEntity entity=new TimelineEntity();
entity.setEntityId("test id 1");
entity.setEntityType("test type 1");
entity.setStartTime(System.currentTimeMillis());
entities.addEntity(entity);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
assertEquals(ClientResponse.Status.FORBIDDEN,response.getClientResponseStatus());
response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
TimelinePutResponse putResposne=response.getEntity(TimelinePutResponse.class);
Assert.assertNotNull(putResposne);
Assert.assertEquals(0,putResposne.getErrors().size());
response=r.path("ws").path("v1").path("timeline").path("test type 1").path("test id 1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
entity=response.getEntity(TimelineEntity.class);
Assert.assertNotNull(entity);
Assert.assertEquals("test id 1",entity.getEntityId());
Assert.assertEquals("test type 1",entity.getEntityType());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier PublicFieldVerifier HybridVerifier
@Test public void testExceptionHandling() throws Exception {
InvocationHandler rtExcHandler=new InvocationHandler(){
@Override public Object invoke( Object proxy, Method method, Object[] args) throws Throwable {
throw new RuntimeException("forced runtime error");
}
}
;
DBIterator dbiter=(DBIterator)Proxy.newProxyInstance(DBIterator.class.getClassLoader(),new Class[]{DBIterator.class},rtExcHandler);
LeveldbIterator iter=new LeveldbIterator(dbiter);
for ( CallInfo ci : RTEXC_METHODS) {
Method method=iter.getClass().getMethod(ci.methodName,ci.argTypes);
assertNotNull("unable to locate method " + ci.methodName,method);
try {
method.invoke(iter,ci.args);
fail("operation should have thrown");
}
catch ( InvocationTargetException ite) {
Throwable exc=ite.getTargetException();
assertTrue("Method " + ci.methodName + " threw non-DBException: "+ exc,exc instanceof DBException);
assertFalse("Method " + ci.methodName + " double-wrapped DBException",exc.getCause() instanceof DBException);
}
}
try {
iter.close();
fail("operation shoul have thrown");
}
catch ( IOException e) {
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testGetProxyUriNull() throws Exception {
URI originalUri=null;
URI proxyUri=new URI("http://proxy.net:8080/");
ApplicationId id=BuilderUtils.newApplicationId(6384623l,5);
URI expected=new URI("http://proxy.net:8080/proxy/application_6384623_0005/");
URI result=ProxyUriUtils.getProxyUri(originalUri,proxyUri,id);
assertEquals(expected,result);
}
APIUtilityVerifier NullVerifier
@Test public void testGetProxyUriFromPluginsReturnsValidUriWhenAble() throws URISyntaxException {
ApplicationId id=BuilderUtils.newApplicationId(6384623l,5);
List list=Lists.newArrayListWithExpectedSize(2);
list.add(new TrackingUriPlugin(){
public URI getTrackingUri( ApplicationId id) throws URISyntaxException {
return null;
}
}
);
list.add(new TrackingUriPlugin(){
public URI getTrackingUri( ApplicationId id) throws URISyntaxException {
return new URI("http://history.server.net/");
}
}
);
URI result=ProxyUriUtils.getUriFromTrackingPlugins(id,list);
assertNotNull(result);
}
APIUtilityVerifier EqualityVerifier
@Test public void testGetProxyUri() throws Exception {
URI originalUri=new URI("http://host.com/static/foo?bar=bar");
URI proxyUri=new URI("http://proxy.net:8080/");
ApplicationId id=BuilderUtils.newApplicationId(6384623l,5);
URI expected=new URI("http://proxy.net:8080/proxy/application_6384623_0005/static/foo?bar=bar");
URI result=ProxyUriUtils.getProxyUri(originalUri,proxyUri,id);
assertEquals(expected,result);
}
APIUtilityVerifier EqualityVerifier
@Test public void testBindAddress(){
YarnConfiguration conf=new YarnConfiguration();
InetSocketAddress defaultBindAddress=WebAppProxyServer.getBindAddress(conf);
Assert.assertEquals("Web Proxy default bind address port is incorrect",YarnConfiguration.DEFAULT_PROXY_PORT,defaultBindAddress.getPort());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test(timeout=5000) public void testWebAppProxyServlet() throws Exception {
Configuration configuration=new Configuration();
configuration.set(YarnConfiguration.PROXY_ADDRESS,"localhost:9090");
configuration.setInt("hadoop.http.max.threads",5);
WebAppProxyServerForTest proxy=new WebAppProxyServerForTest();
proxy.init(configuration);
proxy.start();
int proxyPort=proxy.proxy.proxyServer.getConnectorAddress(0).getPort();
AppReportFetcherForTest appReportFetcher=proxy.proxy.appReportFetcher;
try {
URL wrongUrl=new URL("http://localhost:" + proxyPort + "/proxy/app");
HttpURLConnection proxyConn=(HttpURLConnection)wrongUrl.openConnection();
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR,proxyConn.getResponseCode());
URL url=new URL("http://localhost:" + proxyPort + "/proxy/application_00_0");
proxyConn=(HttpURLConnection)url.openConnection();
proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_OK,proxyConn.getResponseCode());
assertTrue(isResponseCookiePresent(proxyConn,"checked_application_0_0000","true"));
appReportFetcher.answer=1;
proxyConn=(HttpURLConnection)url.openConnection();
proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_NOT_FOUND,proxyConn.getResponseCode());
assertFalse(isResponseCookiePresent(proxyConn,"checked_application_0_0000","true"));
appReportFetcher.answer=4;
proxyConn=(HttpURLConnection)url.openConnection();
proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_NOT_FOUND,proxyConn.getResponseCode());
assertFalse(isResponseCookiePresent(proxyConn,"checked_application_0_0000","true"));
appReportFetcher.answer=2;
proxyConn=(HttpURLConnection)url.openConnection();
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_OK,proxyConn.getResponseCode());
String s=readInputStream(proxyConn.getInputStream());
assertTrue(s.contains("to continue to an Application Master web interface owned by"));
assertTrue(s.contains("WARNING: The following page may not be safe!"));
appReportFetcher.answer=3;
proxyConn=(HttpURLConnection)url.openConnection();
proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_OK,proxyConn.getResponseCode());
}
finally {
proxy.close();
}
}
APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test main method of WebAppProxyServer
*/
@Test(timeout=5000) public void testWebAppProxyServerMainMethod() throws Exception {
WebAppProxyServer mainServer=null;
Configuration conf=new YarnConfiguration();
conf.set(YarnConfiguration.PROXY_ADDRESS,"localhost:9099");
try {
mainServer=WebAppProxyServer.startServer(conf);
int counter=20;
URL wrongUrl=new URL("http://localhost:9099/proxy/app");
HttpURLConnection proxyConn=null;
while (counter > 0) {
counter--;
try {
proxyConn=(HttpURLConnection)wrongUrl.openConnection();
proxyConn.connect();
proxyConn.getResponseCode();
counter=0;
}
catch ( Exception e) {
Thread.sleep(100);
}
}
assertNotNull(proxyConn);
assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR,proxyConn.getResponseCode());
}
finally {
if (mainServer != null) {
mainServer.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetProxyHostsAndPortsForAmFilter(){
Configuration conf=new Configuration(false);
List proxyHosts=WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(1,proxyHosts.size());
assertEquals(WebAppUtils.getResolvedRMWebAppURLWithoutScheme(conf),proxyHosts.get(0));
conf=new Configuration(false);
conf.set(YarnConfiguration.PROXY_ADDRESS,"host1:1000");
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true);
conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2,rm3");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm1","host2:2000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm2","host3:3000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm3","host4:4000");
proxyHosts=WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(1,proxyHosts.size());
assertEquals("host1:1000",proxyHosts.get(0));
conf=new Configuration(false);
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS,"host2:2000");
proxyHosts=WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(1,proxyHosts.size());
Collections.sort(proxyHosts);
assertEquals("host2:2000",proxyHosts.get(0));
conf=new Configuration(false);
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true);
conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2,rm3");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm1","host2:2000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm2","host3:3000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm3","host4:4000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm4","dummy");
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm1","host5:5000");
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm2","host6:6000");
proxyHosts=WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(3,proxyHosts.size());
Collections.sort(proxyHosts);
assertEquals("host2:2000",proxyHosts.get(0));
assertEquals("host3:3000",proxyHosts.get(1));
assertEquals("host4:4000",proxyHosts.get(2));
conf=new Configuration(false);
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,HttpConfig.Policy.HTTPS_ONLY.toString());
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true);
conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2,rm3,dummy");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm1","host2:2000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm2","host3:3000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm3","host4:4000");
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm1","host5:5000");
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm2","host6:6000");
proxyHosts=WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(2,proxyHosts.size());
Collections.sort(proxyHosts);
assertEquals("host5:5000",proxyHosts.get(0));
assertEquals("host6:6000",proxyHosts.get(1));
}
APIUtilityVerifier EqualityVerifier
@Test public void testGetRackHostname(){
String str="/rack1/node1";
String rackHostname[]=SLSUtils.getRackHostName(str);
Assert.assertEquals(rackHostname[0],"rack1");
Assert.assertEquals(rackHostname[1],"node1");
}
APIUtilityVerifier BooleanVerifier
@Test public void testTrackPageHtmlTemplate() throws Exception {
String trackTemplate=FileUtils.readFileToString(new File("src/main/html/track.html.template"));
String trackedQueueInfo="";
Set trackedQueues=new HashSet();
trackedQueues.add("sls_queue_1");
trackedQueues.add("sls_queue_2");
trackedQueues.add("sls_queue_3");
for ( String queue : trackedQueues) {
trackedQueueInfo+="";
}
String trackedAppInfo="";
Set trackedApps=new HashSet();
trackedApps.add("app_1");
trackedApps.add("app_2");
for ( String job : trackedApps) {
trackedAppInfo+="";
}
String trackInfo=MessageFormat.format(trackTemplate,trackedQueueInfo,trackedAppInfo,"s",1000,1000);
Assert.assertTrue("The queue/app tracking html page should not be empty",trackInfo.length() > 0);
}
APIUtilityVerifier BooleanVerifier
@Test public void testSimulatePageHtmlTemplate() throws Exception {
String simulateTemplate=FileUtils.readFileToString(new File("src/main/html/simulate.html.template"));
Set queues=new HashSet();
queues.add("sls_queue_1");
queues.add("sls_queue_2");
queues.add("sls_queue_3");
String queueInfo="";
int i=0;
for ( String queue : queues) {
queueInfo+="legends[4][" + i + "] = 'queue"+ queue+ ".allocated.memory'";
queueInfo+="legends[5][" + i + "] = 'queue"+ queue+ ".allocated.vcores'";
i++;
}
String simulateInfo=MessageFormat.format(simulateTemplate,queueInfo,"s",1000,1000);
Assert.assertTrue("The simulate page html page should not be empty",simulateInfo.length() > 0);
}
APIUtilityVerifier BooleanVerifier
@Test public void testSimulateInfoPageHtmlTemplate() throws Exception {
String simulateInfoTemplate=FileUtils.readFileToString(new File("src/main/html/simulate.info.html.template"));
SLSRunner.simulateInfoMap.put("Number of racks",10);
SLSRunner.simulateInfoMap.put("Number of nodes",100);
SLSRunner.simulateInfoMap.put("Node memory (MB)",1024);
SLSRunner.simulateInfoMap.put("Node VCores",1);
SLSRunner.simulateInfoMap.put("Number of applications",100);
SLSRunner.simulateInfoMap.put("Number of tasks",1000);
SLSRunner.simulateInfoMap.put("Average tasks per applicaion",10);
SLSRunner.simulateInfoMap.put("Number of queues",4);
SLSRunner.simulateInfoMap.put("Average applications per queue",25);
SLSRunner.simulateInfoMap.put("Estimated simulate time (s)",10000);
StringBuilder info=new StringBuilder();
for ( Map.Entry entry : SLSRunner.simulateInfoMap.entrySet()) {
info.append("");
info.append("" + entry.getKey() + " ");
info.append("" + entry.getValue() + " ");
info.append(" ");
}
String simulateInfo=MessageFormat.format(simulateInfoTemplate,info.toString());
Assert.assertTrue("The simulate info html page should not be empty",simulateInfo.length() > 0);
for ( Map.Entry entry : SLSRunner.simulateInfoMap.entrySet()) {
Assert.assertTrue("The simulate info html page should have information " + "of " + entry.getKey(),simulateInfo.contains("" + entry.getKey() + " "+ entry.getValue()+ " "));
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testConvertUrlWithNoPort() throws URISyntaxException {
Path expectedPath=new Path("hdfs://foo.com");
URL url=ConverterUtils.getYarnUrlFromPath(expectedPath);
Path actualPath=ConverterUtils.getPathFromYarnURL(url);
assertEquals(expectedPath,actualPath);
}
APIUtilityVerifier EqualityVerifier
@Test public void testConvertUrlWithUserinfo() throws URISyntaxException {
Path expectedPath=new Path("foo://username:password@example.com:8042");
URL url=ConverterUtils.getYarnUrlFromPath(expectedPath);
Path actualPath=ConverterUtils.getPathFromYarnURL(url);
assertEquals(expectedPath,actualPath);
}
APIUtilityVerifier EqualityVerifier
@Test public void testContainerId() throws URISyntaxException {
ContainerId id=TestContainerId.newContainerId(0,0,0,0);
String cid=ConverterUtils.toString(id);
assertEquals("container_0_0000_00_000000",cid);
ContainerId gen=ConverterUtils.toContainerId(cid);
assertEquals(gen,id);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testDownloadBadPublic() throws IOException, URISyntaxException, InterruptedException {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077");
FileContext files=FileContext.getLocalFSFileContext(conf);
final Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName()));
files.mkdir(basedir,null,true);
conf.setStrings(TestFSDownload.class.getName(),basedir.toString());
Map rsrcVis=new HashMap();
Random rand=new Random();
long sharedSeed=rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map> pending=new HashMap>();
ExecutorService exec=Executors.newSingleThreadExecutor();
LocalDirAllocator dirs=new LocalDirAllocator(TestFSDownload.class.getName());
int size=512;
LocalResourceVisibility vis=LocalResourceVisibility.PUBLIC;
Path path=new Path(basedir,"test-file");
LocalResource rsrc=createFile(files,path,size,rand,vis);
rsrcVis.put(rsrc,vis);
Path destPath=dirs.getLocalPathForWrite(basedir.toString(),size,conf);
destPath=new Path(destPath,Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd=new FSDownload(files,UserGroupInformation.getCurrentUser(),conf,destPath,rsrc);
pending.put(rsrc,exec.submit(fsd));
exec.shutdown();
while (!exec.awaitTermination(1000,TimeUnit.MILLISECONDS)) ;
Assert.assertTrue(pending.get(rsrc).isDone());
try {
for ( Map.Entry> p : pending.entrySet()) {
p.getValue().get();
Assert.fail("We localized a file that is not public.");
}
}
catch ( ExecutionException e) {
Assert.assertTrue(e.getCause() instanceof IOException);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testUniqueDestinationPath() throws Exception {
Configuration conf=new Configuration();
FileContext files=FileContext.getLocalFSFileContext(conf);
final Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName()));
files.mkdir(basedir,null,true);
conf.setStrings(TestFSDownload.class.getName(),basedir.toString());
ExecutorService singleThreadedExec=Executors.newSingleThreadExecutor();
LocalDirAllocator dirs=new LocalDirAllocator(TestFSDownload.class.getName());
Path destPath=dirs.getLocalPathForWrite(basedir.toString(),conf);
destPath=new Path(destPath,Long.toString(uniqueNumberGenerator.incrementAndGet()));
Path p=new Path(basedir,"dir" + 0 + ".jar");
LocalResourceVisibility vis=LocalResourceVisibility.PRIVATE;
LocalResource rsrc=createJar(files,p,vis);
FSDownload fsd=new FSDownload(files,UserGroupInformation.getCurrentUser(),conf,destPath,rsrc);
Future rPath=singleThreadedExec.submit(fsd);
singleThreadedExec.shutdown();
while (!singleThreadedExec.awaitTermination(1000,TimeUnit.MILLISECONDS)) ;
Assert.assertTrue(rPath.isDone());
Assert.assertEquals(destPath,rPath.get().getParent());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testDownload() throws IOException, URISyntaxException, InterruptedException {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077");
FileContext files=FileContext.getLocalFSFileContext(conf);
final Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName()));
files.mkdir(basedir,null,true);
conf.setStrings(TestFSDownload.class.getName(),basedir.toString());
Map rsrcVis=new HashMap();
Random rand=new Random();
long sharedSeed=rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map> pending=new HashMap>();
ExecutorService exec=Executors.newSingleThreadExecutor();
LocalDirAllocator dirs=new LocalDirAllocator(TestFSDownload.class.getName());
int[] sizes=new int[10];
for (int i=0; i < 10; ++i) {
sizes[i]=rand.nextInt(512) + 512;
LocalResourceVisibility vis=LocalResourceVisibility.PRIVATE;
if (i % 2 == 1) {
vis=LocalResourceVisibility.APPLICATION;
}
Path p=new Path(basedir,"" + i);
LocalResource rsrc=createFile(files,p,sizes[i],rand,vis);
rsrcVis.put(rsrc,vis);
Path destPath=dirs.getLocalPathForWrite(basedir.toString(),sizes[i],conf);
destPath=new Path(destPath,Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd=new FSDownload(files,UserGroupInformation.getCurrentUser(),conf,destPath,rsrc);
pending.put(rsrc,exec.submit(fsd));
}
exec.shutdown();
while (!exec.awaitTermination(1000,TimeUnit.MILLISECONDS)) ;
for ( Future path : pending.values()) {
Assert.assertTrue(path.isDone());
}
try {
for ( Map.Entry> p : pending.entrySet()) {
Path localized=p.getValue().get();
assertEquals(sizes[Integer.valueOf(localized.getName())],p.getKey().getSize());
FileStatus status=files.getFileStatus(localized.getParent());
FsPermission perm=status.getPermission();
assertEquals("Cache directory permissions are incorrect",new FsPermission((short)0755),perm);
status=files.getFileStatus(localized);
perm=status.getPermission();
System.out.println("File permission " + perm + " for rsrc vis "+ p.getKey().getVisibility().name());
assert (rsrcVis.containsKey(p.getKey()));
Assert.assertTrue("Private file should be 500",perm.toShort() == FSDownload.PRIVATE_FILE_PERMS.toShort());
}
}
catch ( ExecutionException e) {
throw new IOException("Failed exec",e);
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testProcessTree() throws Exception {
try {
Assert.assertTrue(ProcfsBasedProcessTree.isAvailable());
}
catch ( Exception e) {
LOG.info(StringUtils.stringifyException(e));
Assert.assertTrue("ProcfsBaseProcessTree should be available on Linux",false);
return;
}
Random rm=new Random();
File tempFile=new File(TEST_ROOT_DIR,getClass().getName() + "_shellScript_" + rm.nextInt()+ ".sh");
tempFile.deleteOnExit();
shellScript=TEST_ROOT_DIR + File.separator + tempFile.getName();
tempFile=new File(TEST_ROOT_DIR,getClass().getName() + "_pidFile_" + rm.nextInt()+ ".pid");
tempFile.deleteOnExit();
pidFile=TEST_ROOT_DIR + File.separator + tempFile.getName();
lowestDescendant=TEST_ROOT_DIR + File.separator + "lowestDescendantPidFile";
try {
FileWriter fWriter=new FileWriter(shellScript);
fWriter.write("# rogue task\n" + "sleep 1\n" + "echo hello\n"+ "if [ $1 -ne 0 ]\n"+ "then\n"+ " sh " + shellScript + " $(($1-1))\n"+ "else\n"+ " echo $$ > "+ lowestDescendant+ "\n"+ " while true\n do\n"+ " sleep 5\n"+ " done\n"+ "fi");
fWriter.close();
}
catch ( IOException ioe) {
LOG.info("Error: " + ioe);
return;
}
Thread t=new RogueTaskThread();
t.start();
String pid=getRogueTaskPID();
LOG.info("Root process pid: " + pid);
ProcfsBasedProcessTree p=createProcessTree(pid);
p.updateProcessTree();
LOG.info("ProcessTree: " + p.toString());
File leaf=new File(lowestDescendant);
while (!leaf.exists()) {
try {
Thread.sleep(500);
}
catch ( InterruptedException ie) {
break;
}
}
p.updateProcessTree();
LOG.info("ProcessTree: " + p.toString());
String processTreeDump=p.getProcessTreeDump();
destroyProcessTree(pid);
boolean isAlive=true;
for (int tries=100; tries > 0; tries--) {
if (isSetsidAvailable()) {
isAlive=isAnyProcessInTreeAlive(p);
}
else {
isAlive=isAlive(pid);
}
if (!isAlive) {
break;
}
Thread.sleep(100);
}
if (isAlive) {
fail("ProcessTree shouldn't be alive");
}
LOG.info("Process-tree dump follows: \n" + processTreeDump);
Assert.assertTrue("Process-tree dump doesn't start with a proper header",processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME " + "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) " + "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"));
for (int i=N; i >= 0; i--) {
String cmdLineDump="\\|- [0-9]+ [0-9]+ [0-9]+ [0-9]+ \\(sh\\)" + " [0-9]+ [0-9]+ [0-9]+ [0-9]+ sh " + shellScript + " "+ i;
Pattern pat=Pattern.compile(cmdLineDump);
Matcher mat=pat.matcher(processTreeDump);
Assert.assertTrue("Process-tree dump doesn't contain the cmdLineDump of " + i + "th process!",mat.find());
}
try {
t.join(2000);
LOG.info("RogueTaskThread successfully joined.");
}
catch ( InterruptedException ie) {
LOG.info("Interrupted while joining RogueTaskThread.");
}
p.updateProcessTree();
Assert.assertFalse("ProcessTree must have been gone",isAlive(pid));
Assert.assertTrue("Cumulative vmem for the gone-process is " + p.getCumulativeVmem() + " . It should be zero.",p.getCumulativeVmem() == 0);
Assert.assertTrue(p.toString().equals("[ ]"));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* Test the correctness of process-tree dump.
* @throws IOException
*/
@Test(timeout=30000) public void testProcessTreeDump() throws IOException {
String[] pids={"100","200","300","400","500","600"};
File procfsRootDir=new File(TEST_ROOT_DIR,"proc");
try {
setupProcfsRootDir(procfsRootDir);
setupPidDirs(procfsRootDir,pids);
int numProcesses=pids.length;
ProcessStatInfo[] procInfos=new ProcessStatInfo[numProcesses];
procInfos[0]=new ProcessStatInfo(new String[]{"100","proc1","1","100","100","100000","100","1000","200"});
procInfos[1]=new ProcessStatInfo(new String[]{"200","proc2","100","100","100","200000","200","2000","400"});
procInfos[2]=new ProcessStatInfo(new String[]{"300","proc3","200","100","100","300000","300","3000","600"});
procInfos[3]=new ProcessStatInfo(new String[]{"400","proc4","200","100","100","400000","400","4000","800"});
procInfos[4]=new ProcessStatInfo(new String[]{"500","proc5","400","100","100","400000","400","4000","800"});
procInfos[5]=new ProcessStatInfo(new String[]{"600","proc6","1","1","1","400000","400","4000","800"});
ProcessTreeSmapMemInfo[] memInfos=new ProcessTreeSmapMemInfo[6];
memInfos[0]=new ProcessTreeSmapMemInfo("100");
memInfos[1]=new ProcessTreeSmapMemInfo("200");
memInfos[2]=new ProcessTreeSmapMemInfo("300");
memInfos[3]=new ProcessTreeSmapMemInfo("400");
memInfos[4]=new ProcessTreeSmapMemInfo("500");
memInfos[5]=new ProcessTreeSmapMemInfo("600");
String[] cmdLines=new String[numProcesses];
cmdLines[0]="proc1 arg1 arg2";
cmdLines[1]="proc2 arg3 arg4";
cmdLines[2]="proc3 arg5 arg6";
cmdLines[3]="proc4 arg7 arg8";
cmdLines[4]="proc5 arg9 arg10";
cmdLines[5]="proc6 arg11 arg12";
createMemoryMappingInfo(memInfos);
writeStatFiles(procfsRootDir,pids,procInfos,memInfos);
writeCmdLineFiles(procfsRootDir,pids,cmdLines);
ProcfsBasedProcessTree processTree=createProcessTree("100",procfsRootDir.getAbsolutePath());
processTree.updateProcessTree();
String processTreeDump=processTree.getProcessTreeDump();
LOG.info("Process-tree dump follows: \n" + processTreeDump);
Assert.assertTrue("Process-tree dump doesn't start with a proper header",processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME " + "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) " + "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"));
for (int i=0; i < 5; i++) {
ProcessStatInfo p=procInfos[i];
Assert.assertTrue("Process-tree dump doesn't contain the cmdLineDump of process " + p.pid,processTreeDump.contains("\t|- " + p.pid + " "+ p.ppid+ " "+ p.pgrpId+ " "+ p.session+ " ("+ p.name+ ") "+ p.utime+ " "+ p.stime+ " "+ p.vmem+ " "+ p.rssmemPage+ " "+ cmdLines[i]));
}
ProcessStatInfo p=procInfos[5];
Assert.assertFalse("Process-tree dump shouldn't contain the cmdLineDump of process " + p.pid,processTreeDump.contains("\t|- " + p.pid + " "+ p.ppid+ " "+ p.pgrpId+ " "+ p.session+ " ("+ p.name+ ") "+ p.utime+ " "+ p.stime+ " "+ p.vmem+ " "+ cmdLines[5]));
}
finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* A basic test that creates a few process directories and writes stat files.
* Verifies that the cpu time and memory is correctly computed.
* @throws IOExceptionif there was a problem setting up the fake procfs directories or
* files.
*/
@Test(timeout=30000) public void testCpuAndMemoryForProcessTree() throws IOException {
String[] pids={"100","200","300","400"};
File procfsRootDir=new File(TEST_ROOT_DIR,"proc");
try {
setupProcfsRootDir(procfsRootDir);
setupPidDirs(procfsRootDir,pids);
ProcessStatInfo[] procInfos=new ProcessStatInfo[4];
procInfos[0]=new ProcessStatInfo(new String[]{"100","proc1","1","100","100","100000","100","1000","200"});
procInfos[1]=new ProcessStatInfo(new String[]{"200","proc2","100","100","100","200000","200","2000","400"});
procInfos[2]=new ProcessStatInfo(new String[]{"300","proc3","200","100","100","300000","300","3000","600"});
procInfos[3]=new ProcessStatInfo(new String[]{"400","proc4","1","400","400","400000","400","4000","800"});
ProcessTreeSmapMemInfo[] memInfo=new ProcessTreeSmapMemInfo[4];
memInfo[0]=new ProcessTreeSmapMemInfo("100");
memInfo[1]=new ProcessTreeSmapMemInfo("200");
memInfo[2]=new ProcessTreeSmapMemInfo("300");
memInfo[3]=new ProcessTreeSmapMemInfo("400");
createMemoryMappingInfo(memInfo);
writeStatFiles(procfsRootDir,pids,procInfos,memInfo);
Configuration conf=new Configuration();
ProcfsBasedProcessTree processTree=createProcessTree("100",procfsRootDir.getAbsolutePath());
processTree.setConf(conf);
processTree.updateProcessTree();
Assert.assertEquals("Cumulative virtual memory does not match",600000L,processTree.getCumulativeVmem());
long cumuRssMem=ProcfsBasedProcessTree.PAGE_SIZE > 0 ? 600L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
Assert.assertEquals("Cumulative rss memory does not match",cumuRssMem,processTree.getCumulativeRssmem());
long cumuCpuTime=ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0 ? 7200L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
Assert.assertEquals("Cumulative cpu time does not match",cumuCpuTime,processTree.getCumulativeCpuTime());
setSmapsInProceTree(processTree,true);
Assert.assertEquals("Cumulative rss memory does not match",(100 * KB_TO_BYTES * 3),processTree.getCumulativeRssmem());
procInfos[0]=new ProcessStatInfo(new String[]{"100","proc1","1","100","100","100000","100","2000","300"});
procInfos[1]=new ProcessStatInfo(new String[]{"200","proc2","100","100","100","200000","200","3000","500"});
writeStatFiles(procfsRootDir,pids,procInfos,memInfo);
processTree.updateProcessTree();
cumuCpuTime=ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0 ? 9400L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
Assert.assertEquals("Cumulative cpu time does not match",cumuCpuTime,processTree.getCumulativeCpuTime());
}
finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
APIUtilityVerifier EqualityVerifier
@Test public void testCaching(){
Configuration conf=new Configuration();
conf.setClass(CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class);
RackResolver.init(conf);
try {
InetAddress iaddr=InetAddress.getByName("host1");
MyResolver.resolvedHost1=iaddr.getHostAddress();
}
catch ( UnknownHostException e) {
}
Node node=RackResolver.resolve("host1");
Assert.assertEquals("/rack1",node.getNetworkLocation());
node=RackResolver.resolve("host1");
Assert.assertEquals("/rack1",node.getNetworkLocation());
node=RackResolver.resolve(invalidHost);
Assert.assertEquals(NetworkTopology.DEFAULT_RACK,node.getNetworkLocation());
}
APIUtilityVerifier NullVerifier ConditionMatcher HybridVerifier
@Test public void testCreatedInstanceConfigured(){
ResourceCalculatorProcessTree tree;
Configuration conf=new Configuration();
tree=ResourceCalculatorProcessTree.getResourceCalculatorProcessTree("1",EmptyProcessTree.class,conf);
assertNotNull(tree);
assertThat(tree.getConf(),sameInstance(conf));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCreateWithPort(){
WebApp app=WebApps.$for(this).at(0).start();
int port=app.getListenerAddress().getPort();
assertTrue(port > 0);
app.stop();
app=WebApps.$for(this).at(port).start();
assertEquals(port,app.getListenerAddress().getPort());
app.stop();
}
APIUtilityVerifier EqualityVerifier
@Test public void testDefaultRoutes() throws Exception {
WebApp app=WebApps.$for("test",this).start();
String baseUrl=baseUrl(app);
try {
assertEquals("foo",getContent(baseUrl + "test/foo").trim());
assertEquals("foo",getContent(baseUrl + "test/foo/index").trim());
assertEquals("bar",getContent(baseUrl + "test/foo/bar").trim());
assertEquals("default",getContent(baseUrl + "test").trim());
assertEquals("default",getContent(baseUrl + "test/").trim());
assertEquals("default",getContent(baseUrl).trim());
}
finally {
app.stop();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCustomRoutes() throws Exception {
WebApp app=WebApps.$for("test",TestWebApp.class,this,"ws").start(new WebApp(){
@Override public void setup(){
bind(MyTestJAXBContextResolver.class);
bind(MyTestWebService.class);
route("/:foo",FooController.class);
route("/bar/foo",FooController.class,"bar");
route("/foo/:foo",DefaultController.class);
route("/foo/bar/:foo",DefaultController.class,"index");
}
}
);
String baseUrl=baseUrl(app);
try {
assertEquals("foo",getContent(baseUrl).trim());
assertEquals("foo",getContent(baseUrl + "test").trim());
assertEquals("foo1",getContent(baseUrl + "test/1").trim());
assertEquals("bar",getContent(baseUrl + "test/bar/foo").trim());
assertEquals("default",getContent(baseUrl + "test/foo/bar").trim());
assertEquals("default1",getContent(baseUrl + "test/foo/1").trim());
assertEquals("default2",getContent(baseUrl + "test/foo/bar/2").trim());
assertEquals(404,getResponseCode(baseUrl + "test/goo"));
assertEquals(200,getResponseCode(baseUrl + "ws/v1/test"));
assertTrue(getContent(baseUrl + "ws/v1/test").contains("myInfo"));
}
finally {
app.stop();
}
}
APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testYARNWebAppContext() throws Exception {
System.setProperty("hadoop.log.dir","/Not/Existing/dir");
WebApp app=WebApps.$for("test",this).start(new WebApp(){
@Override public void setup(){
route("/",FooController.class);
}
}
);
String baseUrl=baseUrl(app);
try {
assertFalse("foo".equals(getContent(baseUrl + "static").trim()));
assertEquals(404,getResponseCode(baseUrl + "logs"));
assertEquals("foo",getContent(baseUrl).trim());
}
finally {
app.stop();
}
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier
@Test public void testCreateInjector() throws Exception {
Bar bar=new Bar();
Injector injector=WebAppTests.createMockInjector(Foo.class,bar);
logInstances(injector.getInstance(HttpServletRequest.class),injector.getInstance(HttpServletResponse.class),injector.getInstance(HttpServletResponse.class).getWriter());
assertSame(bar,injector.getInstance(Foo.class));
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier
@Test public void testCreateInjector2(){
final FooBar foobar=new FooBar();
Bar bar=new Bar();
Injector injector=WebAppTests.createMockInjector(Foo.class,bar,new AbstractModule(){
@Override protected void configure(){
bind(Bar.class).toInstance(foobar);
}
}
);
assertNotSame(bar,injector.getInstance(Bar.class));
assertSame(foobar,injector.getInstance(Bar.class));
}
APIUtilityVerifier EqualityVerifier
@Test public void testLoadSslConfiguration() throws Exception {
Configuration conf=provisionCredentialsForSSL();
TestBuilder builder=(TestBuilder)new TestBuilder();
builder=(TestBuilder)WebAppUtils.loadSslConfiguration(builder,conf);
String keypass="keypass";
String storepass="storepass";
String trustpass="trustpass";
assertEquals(keypass,((TestBuilder)builder).keypass);
assertEquals(storepass,((TestBuilder)builder).keystorePassword);
assertEquals(trustpass,((TestBuilder)builder).truststorePassword);
}